Compare commits

..

30 Commits
dev ... main

Author SHA1 Message Date
Stepan Vladovskiy
fb6e03c1a2 no force, deployd from gitea
Some checks failed
Deploy to discoursio-api / deploy (push) Failing after 46s
2025-04-07 11:14:58 -03:00
Stepan Vladovskiy
46c3345f45 debug: with force
Some checks failed
Deploy to discoursio-api / deploy (push) Failing after 1m42s
2025-04-07 11:11:01 -03:00
Stepan Vladovskiy
1156a32a88 feat: move map from nginx sigil to nginx global config
All checks were successful
Deploy to discoursio-api / deploy (push) Successful in 1m54s
2025-01-30 12:40:24 -03:00
d848af524f runtime-fix 2024-12-21 23:31:19 +03:00
c9f88c36cd trigdeploy 2024-12-21 23:00:30 +03:00
0ad44a944e Revert ".."
Some checks failed
Deploy to discoursio-api / deploy (push) Failing after 1m22s
This reverts commit fbd0e03a33.
2024-08-06 21:01:03 +03:00
fbd0e03a33 .. 2024-02-23 17:08:08 +03:00
Stepan Vladovskii
076828f003 feat: no force any more for CI deploy from Gitea
All checks were successful
Deploy to discoursio-api / deploy (push) Successful in 30s
2024-01-28 18:38:34 -03:00
Stepan Vladovskii
4f6c459532 feat: sigil with other architecture
All checks were successful
Deploy to discoursio-api / deploy (push) Successful in 1m41s
2024-01-24 22:53:16 -03:00
Stepan Vladovskii
11524c17ea feat: yess, it was deploy on staging
All checks were successful
Deploy to discoursio-api / deploy (push) Successful in 1m40s
2024-01-24 21:06:11 -03:00
168f845772 Merge branch 'main' of https://dev.discours.io/discours.io/core
Some checks failed
Deploy to discoursio-api / deploy (push) Failing after 1m31s
2024-01-25 02:59:43 +03:00
657146cdca trig-redeploy 2024-01-25 02:56:30 +03:00
Stepan Vladovskii
86111bc9f5 debug: simplify main.yml for actions
Some checks failed
Deploy to discoursio-api / deploy (push) Failing after 58s
2024-01-24 20:45:34 -03:00
Stepan Vladovskii
a8018a0b2f debug: simplify main.yml for actions
Some checks failed
Deploy to discoursio-api / deploy (push) Failing after 3s
2024-01-24 20:39:58 -03:00
Stepan Vladovskii
9d8bd629ab feat: add CI to main for deploy on discoursio-api 2024-01-24 19:09:54 -03:00
1eddf9cc0b topic-resolvers-fix 2024-01-24 18:21:34 +03:00
6415f86286 redis-log-fix 2024-01-24 15:32:53 +03:00
5d1c4f0084 launch-fix 2024-01-24 11:36:15 +03:00
1dce947db6 db-link-fix 2024-01-24 11:19:42 +03:00
4d9551a93c redis-fix 2024-01-24 11:06:46 +03:00
e6471280d5 dockerfile-fix2 2024-01-24 10:59:34 +03:00
3e062b4346 untransform-dockerfile 2024-01-24 10:55:14 +03:00
5b1a93c781 Merge branch 'main' of github.com:Discours/discours-backend 2024-01-24 10:47:07 +03:00
c30001547a Merge branch 'main' of github.com:Discours/discours-backend 2023-12-24 17:26:17 +03:00
025019b544 feat: add ACME location
Some checks failed
Deploy / push_to_target_repository (push) Failing after 4m25s
2023-11-28 14:20:19 -03:00
a862a11c91 Revert "some-fixes"
Some checks failed
Deploy / push_to_target_repository (push) Failing after 4m33s
This reverts commit f3d86daea7.
2023-11-24 12:54:01 +03:00
f3d86daea7 some-fixes 2023-11-24 05:19:25 +03:00
296716397e Merge branch 'main' of github.com:Discours/discours-backend 2023-11-24 00:38:20 +03:00
22c42839c1 new-sigil 2023-10-09 18:12:47 +03:00
4fd90e305f Merge branch 'main' of https://github.com/Discours/discours-backend 2023-10-09 10:32:33 +03:00
157 changed files with 11751 additions and 11760 deletions

View File

@ -1 +0,0 @@
# Add directories or file patterns to ignore during indexing (e.g. foo/ or *.csv)

View File

@ -1,5 +1,9 @@
name: 'Deploy on push' name: 'Deploy to discoursio-api'
on: [push] on:
push:
branches:
- main
jobs: jobs:
deploy: deploy:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -17,19 +21,11 @@ jobs:
id: branch_name id: branch_name
run: echo "::set-output name=branch::$(echo ${GITHUB_REF##*/})" run: echo "::set-output name=branch::$(echo ${GITHUB_REF##*/})"
- name: Push to dokku for main branch - name: Push to dokku
if: github.ref == 'refs/heads/main'
uses: dokku/github-action@master uses: dokku/github-action@master
with: with:
branch: 'main' branch: 'main'
git_remote_url: 'ssh://dokku@v2.discours.io:22/discoursio-api' git_remote_url: 'ssh://dokku@v2.discours.io:22/discoursio-api'
ssh_private_key: ${{ secrets.SSH_PRIVATE_KEY }} ssh_private_key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: Push to dokku for dev branch
if: github.ref == 'refs/heads/dev'
uses: dokku/github-action@master
with:
branch: 'dev'
force: true
git_remote_url: 'ssh://dokku@v2.discours.io:22/core'
ssh_private_key: ${{ secrets.SSH_PRIVATE_KEY }}

16
.github/workflows/checks.yml vendored Normal file
View File

@ -0,0 +1,16 @@
name: Checks
on: [pull_request]
jobs:
build:
runs-on: ubuntu-latest
name: Checks
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.10.6
- run: pip install --upgrade pip
- run: pip install -r requirements.txt
- run: pip install -r requirements-dev.txt
- run: ./checks.sh

View File

@ -17,11 +17,11 @@ jobs:
- uses: webfactory/ssh-agent@v0.8.0 - uses: webfactory/ssh-agent@v0.8.0
with: with:
ssh-private-key: ${{ github.action.secrets.SSH_PRIVATE_KEY }} ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: Push to dokku - name: Push to dokku
env: env:
HOST_KEY: ${{ github.action.secrets.HOST_KEY }} HOST_KEY: ${{ secrets.HOST_KEY }}
run: | run: |
echo $HOST_KEY > ~/.ssh/known_hosts echo $HOST_KEY > ~/.ssh/known_hosts
git remote add dokku dokku@v2.discours.io:discoursio-api git remote add dokku dokku@v2.discours.io:discoursio-api

23
.gitignore vendored
View File

@ -147,20 +147,11 @@ migration/content/**/*.md
*.csv *.csv
dev-server.pid dev-server.pid
backups/ backups/
poetry.lock
.ruff_cache .ruff_cache
.jj .venv
.zed poetry.lock
.devcontainer/devcontainer.json
dokku_config localhost-key.pem
.gitignore
*.db discoursio.db
*.sqlite3 localhost.pem
views.json
*.pem
*.key
*.crt
*cache.json
.cursor
node_modules/

View File

@ -1,18 +1,44 @@
exclude: |
(?x)(
^tests/unit_tests/resource|
_grpc.py|
_pb2.py
)
default_language_version:
python: python3.10
repos: repos:
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0 rev: v4.5.0
hooks: hooks:
- id: check-yaml - id: check-added-large-files
- id: check-case-conflict
- id: check-docstring-first
- id: check-json
- id: check-merge-conflict
- id: check-toml - id: check-toml
- id: check-yaml
- id: end-of-file-fixer - id: end-of-file-fixer
- id: trailing-whitespace - id: trailing-whitespace
- id: check-added-large-files - id: requirements-txt-fixer
- id: detect-private-key
- id: check-ast
- id: check-merge-conflict
- repo: https://github.com/astral-sh/ruff-pre-commit - repo: https://github.com/timothycrosley/isort
rev: v0.4.7 rev: 5.12.0
hooks: hooks:
- id: ruff - id: isort
args: [--fix]
- repo: https://github.com/ambv/black
rev: 23.10.1
hooks:
- id: black
- repo: https://github.com/PyCQA/flake8
rev: 6.1.0
hooks:
- id: flake8
# - repo: https://github.com/python/mypy
# rev: v1.6.1
# hooks:
# - id: mypy

View File

@ -1,340 +0,0 @@
#### [0.4.20] - 2025-05-03
- Исправлена ошибка в классе `CacheRevalidationManager`: добавлена инициализация атрибута `_redis`
- Улучшена обработка соединения с Redis в менеджере ревалидации кэша:
- Автоматическое восстановление соединения в случае его потери
- Проверка соединения перед выполнением операций с кэшем
- Дополнительное логирование для упрощения диагностики проблем
- Исправлен резолвер `unpublish_shout`:
- Корректное формирование синтетического поля `publication` с `published_at: null`
- Возвращение полноценного словаря с данными вместо объекта модели
- Улучшена загрузка связанных данных (авторы, темы) для правильного формирования ответа
#### [0.4.19] - 2025-04-14
- dropped `Shout.description` and `Draft.description` to be UX-generated
- use redis to init views counters after migrator
#### [0.4.18] - 2025-04-10
- Fixed `Topic.stat.authors` and `Topic.stat.comments`
- Fixed unique constraint violation for empty slug values:
- Modified `update_draft` resolver to handle empty slug values
- Modified `create_draft` resolver to prevent empty slug values
- Added validation to prevent inserting or updating drafts with empty slug
- Fixed database error "duplicate key value violates unique constraint draft_slug_key"
#### [0.4.17] - 2025-03-26
- Fixed `'Reaction' object is not subscriptable` error in hierarchical comments:
- Modified `get_reactions_with_stat()` to convert Reaction objects to dictionaries
- Added default values for limit/offset parameters
- Fixed `load_first_replies()` implementation with proper parameter passing
- Added doctest with example usage
- Limited child comments to 100 per parent for performance
#### [0.4.16] - 2025-03-22
- Added hierarchical comments pagination:
- Created new GraphQL query `load_comments_branch` for efficient loading of hierarchical comments
- Ability to load root comments with their first N replies
- Added pagination for both root and child comments
- Using existing `comments_count` field in `Stat` type to display number of replies
- Added special `first_replies` field to store first replies to a comment
- Optimized SQL queries for efficient loading of comment hierarchies
- Implemented flexible comment sorting system (by time, rating)
#### [0.4.15] - 2025-03-22
- Upgraded caching system described `docs/caching.md`
- Module `cache/memorycache.py` removed
- Enhanced caching system with backward compatibility:
- Unified cache key generation with support for existing naming patterns
- Improved Redis operation function with better error handling
- Updated precache module to use consistent Redis interface
- Integrated revalidator with the invalidation system for better performance
- Added comprehensive documentation for the caching system
- Enhanced cached_query to support template-based cache keys
- Standardized error handling across all cache operations
- Optimized cache invalidation system:
- Added targeted invalidation for individual entities (authors, topics)
- Improved revalidation manager with individual object processing
- Implemented batched processing for high-volume invalidations
- Reduced Redis operations by using precise key invalidation instead of prefix-based wipes
- Added special handling for slug changes in topics
- Unified caching system for all models:
- Implemented abstract functions `cache_data`, `get_cached_data` and `invalidate_cache_by_prefix`
- Added `cached_query` function for unified approach to query caching
- Updated resolvers `author.py` and `topic.py` to use the new caching API
- Improved logging for cache operations to simplify debugging
- Optimized Redis memory usage through key format unification
- Improved caching and sorting in Topic and Author modules:
- Added support for dictionary sorting parameters in `by` for both modules
- Optimized cache key generation for stable behavior with various parameters
- Enhanced sorting logic with direction support and arbitrary fields
- Added `by` parameter support in the API for getting topics by community
- Performance optimizations for author-related queries:
- Added SQLAlchemy-managed indexes to `Author`, `AuthorFollower`, `AuthorRating` and `AuthorBookmark` models
- Implemented persistent Redis caching for author queries without TTL (invalidated only on changes)
- Optimized author retrieval with separate endpoints:
- `get_authors_all` - returns all non-deleted authors without statistics
- `load_authors_by` - optimized to use caching and efficient sorting and pagination
- Improved SQL queries with optimized JOIN conditions and efficient filtering
- Added pre-aggregation of statistics (shouts count, followers count) in single efficient queries
- Implemented robust cache invalidation on author updates
- Created necessary indexes for author lookups by user ID, slug, and timestamps
#### [0.4.14] - 2025-03-21
- Significant performance improvements for topic queries:
- Added database indexes to optimize JOIN operations
- Implemented persistent Redis caching for topic queries (no TTL, invalidated only on changes)
- Optimized topic retrieval with separate endpoints for different use cases:
- `get_topics_all` - returns all topics without statistics for lightweight listing
- `get_topics_by_community` - adds pagination and optimized filtering by community
- Added SQLAlchemy-managed indexes directly in ORM models for automatic schema maintenance
- Created `sync_indexes()` function for automatic index synchronization during app startup
- Reduced database load by pre-aggregating statistics in optimized SQL queries
- Added robust cache invalidation on topic create/update/delete operations
- Improved query optimization with proper JOIN conditions and specific partial indexes
#### [0.4.13] - 2025-03-20
- Fixed Topic objects serialization error in cache/memorycache.py
- Improved CustomJSONEncoder to support SQLAlchemy models with dict() method
- Enhanced error handling in cache_on_arguments decorator
- Modified `load_reactions_by` to include deleted reactions when `include_deleted=true` for proper comment tree building
- Fixed featured/unfeatured logic in reaction processing:
- Dislike reactions now properly take precedence over likes
- Featured status now requires more than 4 likes from users with featured articles
- Removed unnecessary filters for deleted reactions since rating reactions are physically deleted
- Author's featured status now based on having non-deleted articles with featured_at
#### [0.4.12] - 2025-03-19
- `delete_reaction` detects comments and uses `deleted_at` update
- `check_to_unfeature` etc. update
- dogpile dep in `services/memorycache.py` optimized
#### [0.4.11] - 2025-02-12
- `create_draft` resolver requires draft_id fixed
- `create_draft` resolver defaults body and title fields to empty string
#### [0.4.9] - 2025-02-09
- `Shout.draft` field added
- `Draft` entity added
- `create_draft`, `update_draft`, `delete_draft` mutations and resolvers added
- `create_shout`, `update_shout`, `delete_shout` mutations removed from GraphQL API
- `load_drafts` resolver implemented
- `publish_` and `unpublish_` mutations and resolvers added
- `create_`, `update_`, `delete_` mutations and resolvers added for `Draft` entity
- tests with pytest for original auth, shouts, drafts
- `Dockerfile` and `pyproject.toml` removed for the simplicity: `Procfile` and `requirements.txt`
#### [0.4.8] - 2025-02-03
- `Reaction.deleted_at` filter on `update_reaction` resolver added
- `triggers` module updated with `after_shout_handler`, `after_reaction_handler` for cache revalidation
- `after_shout_handler`, `after_reaction_handler` now also handle `deleted_at` field
- `get_cached_topic_followers` fixed
- `get_my_rates_comments` fixed
#### [0.4.7]
- `get_my_rates_shouts` resolver added with:
- `shout_id` and `my_rate` fields in response
- filters by `Reaction.deleted_at.is_(None)`
- filters by `Reaction.kind.in_([ReactionKind.LIKE.value, ReactionKind.DISLIKE.value])`
- filters by `Reaction.reply_to.is_(None)`
- uses `local_session()` context manager
- returns empty list on errors
- SQLAlchemy syntax updated:
- `select()` statement fixed for newer versions
- `Reaction` model direct selection instead of labeled columns
- proper row access with `row[0].shout` and `row[0].kind`
- GraphQL resolver fixes:
- added root parameter `_` to match schema
- proper async/await handling with `@login_required`
- error logging added via `logger.error()`
#### [0.4.6]
- login_accepted decorator added
- `docs` added
- optimized and unified `load_shouts_*` resolvers with `LoadShoutsOptions`
- `load_shouts_bookmarked` resolver fixed
- resolvers updates:
- new resolvers group `feed`
- `load_shouts_authored_by` resolver added
- `load_shouts_with_topic` resolver added
- `load_shouts_followed` removed
- `load_shouts_random_topic` removed
- `get_topics_random` removed
- model updates:
- `ShoutsOrderBy` enum added
- `Shout.main_topic` from `ShoutTopic.main` as `Topic` type output
- `Shout.created_by` as `Author` type output
#### [0.4.5]
- `bookmark_shout` mutation resolver added
- `load_shouts_bookmarked` resolver added
- `get_communities_by_author` resolver added
- `get_communities_all` resolver fixed
- `Community` stats in orm
- `Community` CUDL resolvers added
- `Reaction` filter by `Reaction.kind`s
- `ReactionSort` enum added
- `CommunityFollowerRole` enum added
- `InviteStatus` enum added
- `Topic.parents` ids added
- `get_shout` resolver accepts slug or shout_id
#### [0.4.4]
- `followers_stat` removed for shout
- sqlite3 support added
- `rating_stat` and `comments_count` fixes
#### [0.4.3]
- cache reimplemented
- load shouts queries unified
- `followers_stat` removed from shout
#### [0.4.2]
- reactions load resolvers separated for ratings (no stats) and comments
- reactions stats improved
- `load_comment_ratings` separate resolver
#### [0.4.1]
- follow/unfollow logic updated and unified with cache
#### [0.4.0]
- chore: version migrator synced
- feat: precache_data on start
- fix: store id list for following cache data
- fix: shouts stat filter out deleted
#### [0.3.5]
- cache isolated to services
- topics followers and authors cached
- redis stores lists of ids
#### [0.3.4]
- `load_authors_by` from cache
#### [0.3.3]
- feat: sentry integration enabled with glitchtip
- fix: reindex on update shout
- packages upgrade, isort
- separated stats queries for author and topic
- fix: feed featured filter
- fts search removed
#### [0.3.2]
- redis cache for what author follows
- redis cache for followers
- graphql add query: get topic followers
#### [0.3.1]
- enabling sentry
- long query log report added
- editor fixes
- authors links cannot be updated by `update_shout` anymore
#### [0.3.0]
- `Shout.featured_at` timestamp of the frontpage featuring event
- added proposal accepting logics
- schema modulized
- Shout.visibility removed
#### [0.2.22]
- added precommit hook
- fmt
- granian asgi
#### [0.2.21]
- fix: rating logix
- fix: `load_top_random_shouts`
- resolvers: `add_stat_*` refactored
- services: use google analytics
- services: minor fixes search
#### [0.2.20]
- services: ackee removed
- services: following manager fixed
- services: import views.json
#### [0.2.19]
- fix: adding `author` role
- fix: stripping `user_id` in auth connector
#### [0.2.18]
- schema: added `Shout.seo` string field
- resolvers: added `/new-author` webhook resolver
- resolvers: added reader.load_shouts_top_random
- resolvers: added reader.load_shouts_unrated
- resolvers: community follower id property name is `.author`
- resolvers: `get_authors_all` and `load_authors_by`
- services: auth connector upgraded
#### [0.2.17]
- schema: enum types workaround, `ReactionKind`, `InviteStatus`, `ShoutVisibility`
- schema: `Shout.created_by`, `Shout.updated_by`
- schema: `Shout.authors` can be empty
- resolvers: optimized `reacted_shouts_updates` query
#### [0.2.16]
- resolvers: collab inviting logics
- resolvers: queries and mutations revision and renaming
- resolvers: `delete_topic(slug)` implemented
- resolvers: added `get_shout_followers`
- resolvers: `load_shouts_by` filters implemented
- orm: invite entity
- schema: `Reaction.range` -> `Reaction.quote`
- filters: `time_ago` -> `after`
- httpx -> aiohttp
#### [0.2.15]
- schema: `Shout.created_by` removed
- schema: `Shout.mainTopic` removed
- services: cached elasticsearch connector
- services: auth is using `user_id` from authorizer
- resolvers: `notify_*` usage fixes
- resolvers: `getAuthor` now accepts slug, `user_id` or `author_id`
- resolvers: login_required usage fixes
#### [0.2.14]
- schema: some fixes from migrator
- schema: `.days` -> `.time_ago`
- schema: `excludeLayout` + `layout` in filters -> `layouts`
- services: db access simpler, no contextmanager
- services: removed Base.create() method
- services: rediscache updated
- resolvers: get_reacted_shouts_updates as followedReactions query
#### [0.2.13]
- services: db context manager
- services: `ViewedStorage` fixes
- services: views are not stored in core db anymore
- schema: snake case in model fields names
- schema: no DateTime scalar
- resolvers: `get_my_feed` comments filter reactions body.is_not('')
- resolvers: `get_my_feed` query fix
- resolvers: `LoadReactionsBy.days` -> `LoadReactionsBy.time_ago`
- resolvers: `LoadShoutsBy.days` -> `LoadShoutsBy.time_ago`
#### [0.2.12]
- `Author.userpic` -> `Author.pic`
- `CommunityFollower.role` is string now
- `Author.user` is string now
#### [0.2.11]
- redis interface updated
- `viewed` interface updated
- `presence` interface updated
- notify on create, update, delete for reaction and shout
- notify on follow / unfollow author
- use pyproject
- devmode fixed
#### [0.2.10]
- community resolvers connected
#### [0.2.9]
- starlette is back, aiohttp removed
- aioredis replaced with aredis
#### [0.2.8]
- refactored
#### [0.2.7]
- `loadFollowedReactions` now with `

5
CHECKS Normal file
View File

@ -0,0 +1,5 @@
WAIT=10
TIMEOUT=10
ATTEMPTS=3
/

View File

@ -1,18 +1,11 @@
FROM python:slim FROM python:3.11-slim
RUN apt-get update && apt-get install -y \
postgresql-client \
curl \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app WORKDIR /app
EXPOSE 8080
ADD nginx.conf.sigil ./
COPY requirements.txt . COPY requirements.txt .
RUN apt update && apt install -y git gcc curl postgresql
RUN pip install -r requirements.txt RUN pip install -r requirements.txt
COPY . . COPY . .
EXPOSE 8000 CMD python server.py
CMD ["python", "-m", "granian", "main:app", "--interface", "asgi", "--host", "0.0.0.0", "--port", "8000"]

123
README.md
View File

@ -1,102 +1,47 @@
# GraphQL API Backend # discoursio-api
Backend service providing GraphQL API for content management system with reactions, ratings and comments.
## Core Features - sqlalchemy
- redis
- ariadne
- starlette
- uvicorn
### Shouts (Posts) on osx
- CRUD operations via GraphQL mutations ```
- Rich filtering and sorting options brew install redis nginx postgres
- Support for multiple authors and topics brew services start redis
- Rating system with likes/dislikes
- Comments and nested replies
- Bookmarks and following
### Reactions System
- `ReactionKind` types: LIKE, DISLIKE, COMMENT
- Rating calculation for shouts and comments
- User-specific reaction tracking
- Reaction stats and aggregations
- Nested comments support
### Authors & Topics
- Author profiles with stats
- Topic categorization and hierarchy
- Following system for authors/topics
- Activity tracking and stats
- Community features
## Tech Stack
- **(Python)[https://www.python.org/]** 3.12+
- **GraphQL** with [Ariadne](https://ariadnegraphql.org/)
- **(SQLAlchemy)[https://docs.sqlalchemy.org/en/20/orm/]**
- **(PostgreSQL)[https://www.postgresql.org/]/(SQLite)[https://www.sqlite.org/]** support
- **(Starlette)[https://www.starlette.io/]** for ASGI server
- **(Redis)[https://redis.io/]** for caching
## Development
### Prepare environment:
```shell
mkdir .venv
python3.12 -m venv venv
source venv/bin/activate
``` ```
### Run server on debian/ubuntu
```
First, certifcates are required to run the server. apt install redis nginx
```shell
mkcert -install
mkcert localhost
``` ```
Then, run the server: # Local development
```shell Install deps first
python server.py dev
```
pip install -r requirements.txt
pip install -r requirements-dev.txt
pre-commit install
``` ```
### Useful Commands Create database from backup
```
```shell ./restdb.sh
# Linting and import sorting
ruff check . --fix --select I
# Code formatting
ruff format . --line-length=120
# Run tests
pytest
# Type checking
mypy .
``` ```
### Code Style Start local server
We use:
- Ruff for linting and import sorting
- Line length: 120 characters
- Python type hints
- Docstrings for public methods
### GraphQL Development
Test queries in GraphQL Playground at `http://localhost:8000`:
```graphql
# Example query
query GetShout($slug: String) {
get_shout(slug: $slug) {
id
title
main_author {
name
}
}
}
``` ```
python3 server.py dev
```
# How to do an authorized request
Put the header 'Authorization' with token from signIn query or registerUser mutation.
# How to debug Ackee
Set ACKEE_TOKEN var

View File

@ -1,6 +0,0 @@
import os
import sys
# Получаем путь к корневой директории проекта
root_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(root_path)

110
alembic.ini Normal file
View File

@ -0,0 +1,110 @@
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = alembic
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
# Uncomment the line below if you want the files to be prepended with date and time
# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
# for all available tokens
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
# sys.path path, will be prepended to sys.path if present.
# defaults to the current working directory.
prepend_sys_path = .
# timezone to use when rendering the date within the migration file
# as well as the filename.
# If specified, requires the python-dateutil library that can be
# installed by adding `alembic[tz]` to the pip requirements
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
# timezone =
# max length of characters to apply to the
# "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; This defaults
# to alembic/versions. When using multiple version
# directories, initial revisions must be specified with --version-path.
# The path separator used here should be the separator specified by "version_path_separator" below.
# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions
# version path separator; As mentioned above, this is the character used to split
# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
# Valid values for version_path_separator are:
#
# version_path_separator = :
# version_path_separator = ;
# version_path_separator = space
version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
# set to 'true' to search source files recursively
# in each "version_locations" directory
# new in Alembic version 1.10
# recursive_version_locations = false
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url = %(DB_URL)
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks = black
# black.type = console_scripts
# black.entrypoint = black
# black.options = -l 79 REVISION_SCRIPT_FILENAME
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

3
alembic/README Normal file
View File

@ -0,0 +1,3 @@
Generic single-database configuration.
https://alembic.sqlalchemy.org/en/latest/tutorial.html

View File

@ -3,7 +3,7 @@ from logging.config import fileConfig
from sqlalchemy import engine_from_config, pool from sqlalchemy import engine_from_config, pool
from alembic import context from alembic import context
from services.db import Base from base.orm import Base
from settings import DB_URL from settings import DB_URL
# this is the Alembic Config object, which provides # this is the Alembic Config object, which provides

26
alembic/script.py.mako Normal file
View File

@ -0,0 +1,26 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision: str = ${repr(up_revision)}
down_revision: Union[str, None] = ${repr(down_revision)}
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
def upgrade() -> None:
${upgrades if upgrades else "pass"}
def downgrade() -> None:
${downgrades if downgrades else "pass"}

View File

@ -0,0 +1,26 @@
"""init alembic
Revision ID: fe943b098418
Revises:
Create Date: 2023-08-19 01:37:57.031933
"""
from typing import Sequence, Union
# import sqlalchemy as sa
# from alembic import op
# revision identifiers, used by Alembic.
revision: str = "fe943b098418"
down_revision: Union[str, None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
pass
def downgrade() -> None:
pass

View File

@ -1,15 +0,0 @@
{
"healthchecks": {
"web": [
{
"type": "startup",
"name": "web check",
"description": "Checking if the app responds to the GET /",
"path": "/",
"attempts": 3,
"warn": true,
"initialDelay": 1
}
]
}
}

View File

@ -7,22 +7,26 @@ from starlette.authentication import AuthenticationBackend
from starlette.requests import HTTPConnection from starlette.requests import HTTPConnection
from auth.credentials import AuthCredentials, AuthUser from auth.credentials import AuthCredentials, AuthUser
from auth.exceptions import OperationNotAllowed
from auth.tokenstorage import SessionToken from auth.tokenstorage import SessionToken
from auth.usermodel import Role, User from base.exceptions import OperationNotAllowed
from services.db import local_session from base.orm import local_session
from orm.user import Role, User
from settings import SESSION_TOKEN_HEADER from settings import SESSION_TOKEN_HEADER
class JWTAuthenticate(AuthenticationBackend): class JWTAuthenticate(AuthenticationBackend):
async def authenticate(self, request: HTTPConnection) -> Optional[Tuple[AuthCredentials, AuthUser]]: async def authenticate(
self, request: HTTPConnection
) -> Optional[Tuple[AuthCredentials, AuthUser]]:
if SESSION_TOKEN_HEADER not in request.headers: if SESSION_TOKEN_HEADER not in request.headers:
return AuthCredentials(scopes={}), AuthUser(user_id=None, username="") return AuthCredentials(scopes={}), AuthUser(user_id=None, username="")
token = request.headers.get(SESSION_TOKEN_HEADER) token = request.headers.get(SESSION_TOKEN_HEADER)
if not token: if not token:
print("[auth.authenticate] no token in header %s" % SESSION_TOKEN_HEADER) print("[auth.authenticate] no token in header %s" % SESSION_TOKEN_HEADER)
return AuthCredentials(scopes={}, error_message=str("no token")), AuthUser(user_id=None, username="") return AuthCredentials(scopes={}, error_message=str("no token")), AuthUser(
user_id=None, username=""
)
if len(token.split(".")) > 1: if len(token.split(".")) > 1:
payload = await SessionToken.verify(token) payload = await SessionToken.verify(token)
@ -48,14 +52,20 @@ class JWTAuthenticate(AuthenticationBackend):
except exc.NoResultFound: except exc.NoResultFound:
pass pass
return AuthCredentials(scopes={}, error_message=str("Invalid token")), AuthUser(user_id=None, username="") return AuthCredentials(scopes={}, error_message=str("Invalid token")), AuthUser(
user_id=None, username=""
)
def login_required(func): def login_required(func):
@wraps(func) @wraps(func)
async def wrap(parent, info: GraphQLResolveInfo, *args, **kwargs): async def wrap(parent, info: GraphQLResolveInfo, *args, **kwargs):
# debug only
# print('[auth.authenticate] login required for %r with info %r' % (func, info))
auth: AuthCredentials = info.context["request"].auth auth: AuthCredentials = info.context["request"].auth
# print(auth)
if not auth or not auth.logged_in: if not auth or not auth.logged_in:
# raise Unauthorized(auth.error_message or "Please login")
return {"error": "Please login first"} return {"error": "Please login first"}
return await func(parent, info, *args, **kwargs) return await func(parent, info, *args, **kwargs)
@ -65,7 +75,9 @@ def login_required(func):
def permission_required(resource, operation, func): def permission_required(resource, operation, func):
@wraps(func) @wraps(func)
async def wrap(parent, info: GraphQLResolveInfo, *args, **kwargs): async def wrap(parent, info: GraphQLResolveInfo, *args, **kwargs):
print("[auth.authenticate] permission_required for %r with info %r" % (func, info)) # debug only print(
"[auth.authenticate] permission_required for %r with info %r" % (func, info)
) # debug only
auth: AuthCredentials = info.context["request"].auth auth: AuthCredentials = info.context["request"].auth
if not auth.logged_in: if not auth.logged_in:
raise OperationNotAllowed(auth.error_message or "Please login") raise OperationNotAllowed(auth.error_message or "Please login")
@ -75,22 +87,3 @@ def permission_required(resource, operation, func):
return await func(parent, info, *args, **kwargs) return await func(parent, info, *args, **kwargs)
return wrap return wrap
def login_accepted(func):
@wraps(func)
async def wrap(parent, info: GraphQLResolveInfo, *args, **kwargs):
auth: AuthCredentials = info.context["request"].auth
# Если есть авторизация, добавляем данные автора в контекст
if auth and auth.logged_in:
info.context["author"] = auth.author
info.context["user_id"] = auth.author.get("id")
else:
# Очищаем данные автора из контекста если авторизация отсутствует
info.context["author"] = None
info.context["user_id"] = None
return await func(parent, info, *args, **kwargs)
return wrap

View File

@ -1,15 +1,15 @@
from binascii import hexlify from binascii import hexlify
from hashlib import sha256 from hashlib import sha256
from jwt import DecodeError, ExpiredSignatureError
from passlib.hash import bcrypt from passlib.hash import bcrypt
from auth.exceptions import ExpiredToken, InvalidToken
from auth.jwtcodec import JWTCodec from auth.jwtcodec import JWTCodec
from auth.tokenstorage import TokenStorage from auth.tokenstorage import TokenStorage
from orm.user import User
# from base.exceptions import InvalidPassword, InvalidToken # from base.exceptions import InvalidPassword, InvalidToken
from services.db import local_session from base.orm import local_session
from orm import User
class Password: class Password:
@ -33,8 +33,8 @@ class Password:
Verify that password hash is equal to specified hash. Hash format: Verify that password hash is equal to specified hash. Hash format:
$2a$10$Ro0CUfOqk6cXEKf3dyaM7OhSCvnwM9s4wIX9JeLapehKK5YdLxKcm $2a$10$Ro0CUfOqk6cXEKf3dyaM7OhSCvnwM9s4wIX9JeLapehKK5YdLxKcm
\__/\/ \____________________/\_____________________________/ # noqa: W605 __ __ ____________________________________________________ # noqa: W605
| | Salt Hash | | | Salt (22) | Hash
| Cost | Cost
Version Version
@ -80,10 +80,10 @@ class Identity:
if not await TokenStorage.exist(f"{payload.user_id}-{payload.username}-{token}"): if not await TokenStorage.exist(f"{payload.user_id}-{payload.username}-{token}"):
# raise InvalidToken("Login token has expired, please login again") # raise InvalidToken("Login token has expired, please login again")
return {"error": "Token has expired"} return {"error": "Token has expired"}
except ExpiredToken: except ExpiredSignatureError:
# raise InvalidToken("Login token has expired, please try again") # raise InvalidToken("Login token has expired, please try again")
return {"error": "Token has expired"} return {"error": "Token has expired"}
except InvalidToken: except DecodeError:
# raise InvalidToken("token format error") from e # raise InvalidToken("token format error") from e
return {"error": "Token format error"} return {"error": "Token format error"}
with local_session() as session: with local_session() as session:

View File

@ -1,23 +1,15 @@
from datetime import datetime, timezone from datetime import datetime, timezone
import jwt import jwt
from pydantic import BaseModel
from auth.exceptions import ExpiredToken, InvalidToken from base.exceptions import ExpiredToken, InvalidToken
from settings import JWT_ALGORITHM, JWT_SECRET_KEY from settings import JWT_ALGORITHM, JWT_SECRET_KEY
from validations.auth import AuthInput, TokenPayload
class TokenPayload(BaseModel):
user_id: str
username: str
exp: datetime
iat: datetime
iss: str
class JWTCodec: class JWTCodec:
@staticmethod @staticmethod
def encode(user, exp: datetime) -> str: def encode(user: AuthInput, exp: datetime) -> str:
payload = { payload = {
"user_id": user.id, "user_id": user.id,
"username": user.email or user.phone, "username": user.email or user.phone,
@ -31,7 +23,7 @@ class JWTCodec:
print("[auth.jwtcodec] JWT encode error %r" % e) print("[auth.jwtcodec] JWT encode error %r" % e)
@staticmethod @staticmethod
def decode(token: str, verify_exp: bool = True): def decode(token: str, verify_exp: bool = True) -> TokenPayload:
r = None r = None
payload = None payload = None
try: try:

View File

@ -1,9 +1,9 @@
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
from auth.jwtcodec import JWTCodec from auth.jwtcodec import JWTCodec
from auth.validations import AuthInput from base.redis import redis
from services.redis import redis
from settings import ONETIME_TOKEN_LIFE_SPAN, SESSION_TOKEN_LIFE_SPAN from settings import ONETIME_TOKEN_LIFE_SPAN, SESSION_TOKEN_LIFE_SPAN
from validations.auth import AuthInput
async def save(token_key, life_span, auto_delete=True): async def save(token_key, life_span, auto_delete=True):

View File

@ -1,116 +0,0 @@
import re
from datetime import datetime
from typing import Dict, List, Optional, Union
from pydantic import BaseModel, Field, field_validator
# RFC 5322 compliant email regex pattern
EMAIL_PATTERN = r"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$"
class AuthInput(BaseModel):
"""Base model for authentication input validation"""
user_id: str = Field(description="Unique user identifier")
username: str = Field(min_length=2, max_length=50)
token: str = Field(min_length=32)
@field_validator("user_id")
@classmethod
def validate_user_id(cls, v: str) -> str:
if not v.strip():
raise ValueError("user_id cannot be empty")
return v
class UserRegistrationInput(BaseModel):
"""Validation model for user registration"""
email: str = Field(max_length=254) # Max email length per RFC 5321
password: str = Field(min_length=8, max_length=100)
name: str = Field(min_length=2, max_length=50)
@field_validator("email")
@classmethod
def validate_email(cls, v: str) -> str:
"""Validate email format"""
if not re.match(EMAIL_PATTERN, v):
raise ValueError("Invalid email format")
return v.lower()
@field_validator("password")
@classmethod
def validate_password_strength(cls, v: str) -> str:
"""Validate password meets security requirements"""
if not any(c.isupper() for c in v):
raise ValueError("Password must contain at least one uppercase letter")
if not any(c.islower() for c in v):
raise ValueError("Password must contain at least one lowercase letter")
if not any(c.isdigit() for c in v):
raise ValueError("Password must contain at least one number")
if not any(c in "!@#$%^&*()_+-=[]{}|;:,.<>?" for c in v):
raise ValueError("Password must contain at least one special character")
return v
class UserLoginInput(BaseModel):
"""Validation model for user login"""
email: str = Field(max_length=254)
password: str = Field(min_length=8, max_length=100)
@field_validator("email")
@classmethod
def validate_email(cls, v: str) -> str:
if not re.match(EMAIL_PATTERN, v):
raise ValueError("Invalid email format")
return v.lower()
class TokenPayload(BaseModel):
"""Validation model for JWT token payload"""
user_id: str
username: str
exp: datetime
iat: datetime
scopes: Optional[List[str]] = []
class OAuthInput(BaseModel):
"""Validation model for OAuth input"""
provider: str = Field(pattern="^(google|github|facebook)$")
code: str
redirect_uri: Optional[str] = None
@field_validator("provider")
@classmethod
def validate_provider(cls, v: str) -> str:
valid_providers = ["google", "github", "facebook"]
if v.lower() not in valid_providers:
raise ValueError(f"Provider must be one of: {', '.join(valid_providers)}")
return v.lower()
class AuthResponse(BaseModel):
"""Validation model for authentication responses"""
success: bool
token: Optional[str] = None
error: Optional[str] = None
user: Optional[Dict[str, Union[str, int, bool]]] = None
@field_validator("error")
@classmethod
def validate_error_if_not_success(cls, v: Optional[str], info) -> Optional[str]:
if not info.data.get("success") and not v:
raise ValueError("Error message required when success is False")
return v
@field_validator("token")
@classmethod
def validate_token_if_success(cls, v: Optional[str], info) -> Optional[str]:
if info.data.get("success") and not v:
raise ValueError("Token required when success is True")
return v

57
base/orm.py Normal file
View File

@ -0,0 +1,57 @@
from typing import Any, Callable, Dict, Generic, TypeVar
from sqlalchemy import Column, Integer, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
from sqlalchemy.sql.schema import Table
from settings import DB_URL
engine = create_engine(DB_URL, echo=False, pool_size=10, max_overflow=20)
T = TypeVar("T")
REGISTRY: Dict[str, type] = {}
def local_session():
return Session(bind=engine, expire_on_commit=False)
DeclarativeBase = declarative_base() # type: Any
class Base(DeclarativeBase):
__table__: Table
__tablename__: str
__new__: Callable
__init__: Callable
__allow_unmapped__ = True
__abstract__ = True
__table_args__ = {"extend_existing": True}
id = Column(Integer, primary_key=True)
def __init_subclass__(cls, **kwargs):
REGISTRY[cls.__name__] = cls
@classmethod
def create(cls: Generic[T], **kwargs) -> Generic[T]:
instance = cls(**kwargs)
return instance.save()
def save(self) -> Generic[T]:
with local_session() as session:
session.add(self)
session.commit()
return self
def update(self, input):
column_names = self.__table__.columns.keys()
for name, value in input.items():
if name in column_names:
setattr(self, name, value)
def dict(self) -> Dict[str, Any]:
column_names = self.__table__.columns.keys()
return {c: getattr(self, c) for c in column_names}

View File

@ -1,41 +1,32 @@
import logging import redis.asyncio as aredis
from redis.asyncio import Redis
from settings import REDIS_URL from settings import REDIS_URL
import logging
# Set redis logging level to suppress DEBUG messages logger = logging.getLogger("[services.redis] ")
logger = logging.getLogger("redis") logger.setLevel(logging.DEBUG)
logger.setLevel(logging.WARNING)
class RedisService: class RedisCache:
def __init__(self, uri=REDIS_URL): def __init__(self, uri=REDIS_URL):
self._uri: str = uri self._uri: str = uri
self.pubsub_channels = [] self.pubsub_channels = []
self._client = None self._client = None
async def connect(self): async def connect(self):
if self._uri: self._client = aredis.Redis.from_url(self._uri, decode_responses=True)
self._client = await Redis.from_url(self._uri, decode_responses=True)
logger.info("Redis connection was established.")
async def disconnect(self): async def disconnect(self):
if isinstance(self._client, Redis): if self._client:
await self._client.close() await self._client.close()
logger.info("Redis connection was closed.")
async def execute(self, command, *args, **kwargs): async def execute(self, command, *args, **kwargs):
if self._client: if self._client:
try: try:
logger.debug(f"{command}") # {args[0]}") # {args} {kwargs}") logger.debug(f"{command} {args} {kwargs}")
for arg in args:
if isinstance(arg, dict):
if arg.get("_sa_instance_state"):
del arg["_sa_instance_state"]
r = await self._client.execute_command(command, *args, **kwargs) r = await self._client.execute_command(command, *args, **kwargs)
# logger.debug(type(r)) logger.debug(type(r))
# logger.debug(r) logger.debug(r)
return r return r
except Exception as e: except Exception as e:
logger.error(e) logger.error(e)
@ -60,22 +51,12 @@ class RedisService:
return return
await self._client.publish(channel, data) await self._client.publish(channel, data)
async def set(self, key, data, ex=None): async def mget(self, *keys):
# Prepare the command arguments return await self.execute('MGET', *keys)
args = [key, data]
# If an expiration time is provided, add it to the arguments async def lrange(self, key, start, stop):
if ex is not None: return await self.execute('LRANGE', key, start, stop)
args.append("EX")
args.append(ex)
# Execute the command with the provided arguments redis = RedisCache()
await self.execute("set", *args)
async def get(self, key):
return await self.execute("get", key)
redis = RedisService()
__all__ = ["redis"] __all__ = ["redis"]

13
base/resolvers.py Normal file
View File

@ -0,0 +1,13 @@
from ariadne import MutationType, QueryType, ScalarType
datetime_scalar = ScalarType("DateTime")
@datetime_scalar.serializer
def serialize_datetime(value):
return value.isoformat()
query = QueryType()
mutation = MutationType()
resolvers = [query, mutation, datetime_scalar]

628
cache/cache.py vendored
View File

@ -1,628 +0,0 @@
"""
Caching system for the Discours platform
----------------------------------------
This module provides a comprehensive caching solution with these key components:
1. KEY NAMING CONVENTIONS:
- Entity-based keys: "entity:property:value" (e.g., "author:id:123")
- Collection keys: "entity:collection:params" (e.g., "authors:stats:limit=10:offset=0")
- Special case keys: Maintained for backwards compatibility (e.g., "topic_shouts_123")
2. CORE FUNCTIONS:
- cached_query(): High-level function for retrieving cached data or executing queries
3. ENTITY-SPECIFIC FUNCTIONS:
- cache_author(), cache_topic(): Cache entity data
- get_cached_author(), get_cached_topic(): Retrieve entity data from cache
- invalidate_cache_by_prefix(): Invalidate all keys with a specific prefix
4. CACHE INVALIDATION STRATEGY:
- Direct invalidation via invalidate_* functions for immediate changes
- Delayed invalidation via revalidation_manager for background processing
- Event-based triggers for automatic cache updates (see triggers.py)
To maintain consistency with the existing codebase, this module preserves
the original key naming patterns while providing a more structured approach
for new cache operations.
"""
import asyncio
import json
from typing import Any, Dict, List, Optional, Union
import orjson
from sqlalchemy import and_, join, select
from orm.author import Author, AuthorFollower
from orm.shout import Shout, ShoutAuthor, ShoutTopic
from orm.topic import Topic, TopicFollower
from services.db import local_session
from services.redis import redis
from utils.encoders import CustomJSONEncoder
from utils.logger import root_logger as logger
DEFAULT_FOLLOWS = {
"topics": [],
"authors": [],
"shouts": [],
"communities": [{"id": 1, "name": "Дискурс", "slug": "discours", "pic": ""}],
}
CACHE_TTL = 300 # 5 minutes
# Key templates for common entity types
# These are used throughout the codebase and should be maintained for compatibility
CACHE_KEYS = {
"TOPIC_ID": "topic:id:{}",
"TOPIC_SLUG": "topic:slug:{}",
"TOPIC_AUTHORS": "topic:authors:{}",
"TOPIC_FOLLOWERS": "topic:followers:{}",
"TOPIC_SHOUTS": "topic_shouts_{}",
"AUTHOR_ID": "author:id:{}",
"AUTHOR_USER": "author:user:{}",
"SHOUTS": "shouts:{}",
}
# Cache topic data
async def cache_topic(topic: dict):
payload = json.dumps(topic, cls=CustomJSONEncoder)
await asyncio.gather(
redis.execute("SET", f"topic:id:{topic['id']}", payload),
redis.execute("SET", f"topic:slug:{topic['slug']}", payload),
)
# Cache author data
async def cache_author(author: dict):
payload = json.dumps(author, cls=CustomJSONEncoder)
await asyncio.gather(
redis.execute("SET", f"author:user:{author['user'].strip()}", str(author["id"])),
redis.execute("SET", f"author:id:{author['id']}", payload),
)
# Cache follows data
async def cache_follows(follower_id: int, entity_type: str, entity_id: int, is_insert=True):
key = f"author:follows-{entity_type}s:{follower_id}"
follows_str = await redis.execute("GET", key)
follows = orjson.loads(follows_str) if follows_str else DEFAULT_FOLLOWS[entity_type]
if is_insert:
if entity_id not in follows:
follows.append(entity_id)
else:
follows = [eid for eid in follows if eid != entity_id]
await redis.execute("SET", key, json.dumps(follows, cls=CustomJSONEncoder))
await update_follower_stat(follower_id, entity_type, len(follows))
# Update follower statistics
async def update_follower_stat(follower_id, entity_type, count):
follower_key = f"author:id:{follower_id}"
follower_str = await redis.execute("GET", follower_key)
follower = orjson.loads(follower_str) if follower_str else None
if follower:
follower["stat"] = {f"{entity_type}s": count}
await cache_author(follower)
# Get author from cache
async def get_cached_author(author_id: int, get_with_stat):
author_key = f"author:id:{author_id}"
result = await redis.execute("GET", author_key)
if result:
return orjson.loads(result)
# Load from database if not found in cache
q = select(Author).where(Author.id == author_id)
authors = get_with_stat(q)
if authors:
author = authors[0]
await cache_author(author.dict())
return author.dict()
return None
# Function to get cached topic
async def get_cached_topic(topic_id: int):
"""
Fetch topic data from cache or database by id.
Args:
topic_id (int): The identifier for the topic.
Returns:
dict: Topic data or None if not found.
"""
topic_key = f"topic:id:{topic_id}"
cached_topic = await redis.execute("GET", topic_key)
if cached_topic:
return orjson.loads(cached_topic)
# If not in cache, fetch from the database
with local_session() as session:
topic = session.execute(select(Topic).where(Topic.id == topic_id)).scalar_one_or_none()
if topic:
topic_dict = topic.dict()
await redis.execute("SET", topic_key, json.dumps(topic_dict, cls=CustomJSONEncoder))
return topic_dict
return None
# Get topic by slug from cache
async def get_cached_topic_by_slug(slug: str, get_with_stat):
topic_key = f"topic:slug:{slug}"
result = await redis.execute("GET", topic_key)
if result:
return orjson.loads(result)
# Load from database if not found in cache
topic_query = select(Topic).where(Topic.slug == slug)
topics = get_with_stat(topic_query)
if topics:
topic_dict = topics[0].dict()
await cache_topic(topic_dict)
return topic_dict
return None
# Get list of authors by ID from cache
async def get_cached_authors_by_ids(author_ids: List[int]) -> List[dict]:
# Fetch all author data concurrently
keys = [f"author:id:{author_id}" for author_id in author_ids]
results = await asyncio.gather(*(redis.execute("GET", key) for key in keys))
authors = [orjson.loads(result) if result else None for result in results]
# Load missing authors from database and cache
missing_indices = [index for index, author in enumerate(authors) if author is None]
if missing_indices:
missing_ids = [author_ids[index] for index in missing_indices]
with local_session() as session:
query = select(Author).where(Author.id.in_(missing_ids))
missing_authors = session.execute(query).scalars().all()
await asyncio.gather(*(cache_author(author.dict()) for author in missing_authors))
for index, author in zip(missing_indices, missing_authors):
authors[index] = author.dict()
return authors
async def get_cached_topic_followers(topic_id: int):
"""
Получает подписчиков темы по ID, используя кеш Redis.
Args:
topic_id: ID темы
Returns:
List[dict]: Список подписчиков с их данными
"""
try:
cache_key = CACHE_KEYS["TOPIC_FOLLOWERS"].format(topic_id)
cached = await redis.execute("GET", cache_key)
if cached:
followers_ids = orjson.loads(cached)
logger.debug(f"Found {len(followers_ids)} cached followers for topic #{topic_id}")
return await get_cached_authors_by_ids(followers_ids)
with local_session() as session:
followers_ids = [
f[0]
for f in session.query(Author.id)
.join(TopicFollower, TopicFollower.follower == Author.id)
.filter(TopicFollower.topic == topic_id)
.all()
]
await redis.execute("SETEX", cache_key, CACHE_TTL, orjson.dumps(followers_ids))
followers = await get_cached_authors_by_ids(followers_ids)
logger.debug(f"Cached {len(followers)} followers for topic #{topic_id}")
return followers
except Exception as e:
logger.error(f"Error getting followers for topic #{topic_id}: {str(e)}")
return []
# Get cached author followers
async def get_cached_author_followers(author_id: int):
# Check cache for data
cached = await redis.execute("GET", f"author:followers:{author_id}")
if cached:
followers_ids = orjson.loads(cached)
followers = await get_cached_authors_by_ids(followers_ids)
logger.debug(f"Cached followers for author #{author_id}: {len(followers)}")
return followers
# Query database if cache is empty
with local_session() as session:
followers_ids = [
f[0]
for f in session.query(Author.id)
.join(AuthorFollower, AuthorFollower.follower == Author.id)
.filter(AuthorFollower.author == author_id, Author.id != author_id)
.all()
]
await redis.execute("SET", f"author:followers:{author_id}", orjson.dumps(followers_ids))
followers = await get_cached_authors_by_ids(followers_ids)
return followers
# Get cached follower authors
async def get_cached_follower_authors(author_id: int):
# Attempt to retrieve authors from cache
cached = await redis.execute("GET", f"author:follows-authors:{author_id}")
if cached:
authors_ids = orjson.loads(cached)
else:
# Query authors from database
with local_session() as session:
authors_ids = [
a[0]
for a in session.execute(
select(Author.id)
.select_from(join(Author, AuthorFollower, Author.id == AuthorFollower.author))
.where(AuthorFollower.follower == author_id)
).all()
]
await redis.execute("SET", f"author:follows-authors:{author_id}", orjson.dumps(authors_ids))
authors = await get_cached_authors_by_ids(authors_ids)
return authors
# Get cached follower topics
async def get_cached_follower_topics(author_id: int):
# Attempt to retrieve topics from cache
cached = await redis.execute("GET", f"author:follows-topics:{author_id}")
if cached:
topics_ids = orjson.loads(cached)
else:
# Load topics from database and cache them
with local_session() as session:
topics_ids = [
t[0]
for t in session.query(Topic.id)
.join(TopicFollower, TopicFollower.topic == Topic.id)
.where(TopicFollower.follower == author_id)
.all()
]
await redis.execute("SET", f"author:follows-topics:{author_id}", orjson.dumps(topics_ids))
topics = []
for topic_id in topics_ids:
topic_str = await redis.execute("GET", f"topic:id:{topic_id}")
if topic_str:
topic = orjson.loads(topic_str)
if topic and topic not in topics:
topics.append(topic)
logger.debug(f"Cached topics for author#{author_id}: {len(topics)}")
return topics
# Get author by user ID from cache
async def get_cached_author_by_user_id(user_id: str, get_with_stat):
"""
Retrieve author information by user_id, checking the cache first, then the database.
Args:
user_id (str): The user identifier for which to retrieve the author.
Returns:
dict: Dictionary with author data or None if not found.
"""
# Attempt to find author ID by user_id in Redis cache
author_id = await redis.execute("GET", f"author:user:{user_id.strip()}")
if author_id:
# If ID is found, get full author data by ID
author_data = await redis.execute("GET", f"author:id:{author_id}")
if author_data:
return orjson.loads(author_data)
# If data is not found in cache, query the database
author_query = select(Author).where(Author.user == user_id)
authors = get_with_stat(author_query)
if authors:
# Cache the retrieved author data
author = authors[0]
author_dict = author.dict()
await asyncio.gather(
redis.execute("SET", f"author:user:{user_id.strip()}", str(author.id)),
redis.execute("SET", f"author:id:{author.id}", orjson.dumps(author_dict)),
)
return author_dict
# Return None if author is not found
return None
# Get cached topic authors
async def get_cached_topic_authors(topic_id: int):
"""
Retrieve a list of authors for a given topic, using cache or database.
Args:
topic_id (int): The identifier of the topic for which to retrieve authors.
Returns:
List[dict]: A list of dictionaries containing author data.
"""
# Attempt to get a list of author IDs from cache
rkey = f"topic:authors:{topic_id}"
cached_authors_ids = await redis.execute("GET", rkey)
if cached_authors_ids:
authors_ids = orjson.loads(cached_authors_ids)
else:
# If cache is empty, get data from the database
with local_session() as session:
query = (
select(ShoutAuthor.author)
.select_from(join(ShoutTopic, Shout, ShoutTopic.shout == Shout.id))
.join(ShoutAuthor, ShoutAuthor.shout == Shout.id)
.where(and_(ShoutTopic.topic == topic_id, Shout.published_at.is_not(None), Shout.deleted_at.is_(None)))
)
authors_ids = [author_id for (author_id,) in session.execute(query).all()]
# Cache the retrieved author IDs
await redis.execute("SET", rkey, orjson.dumps(authors_ids))
# Retrieve full author details from cached IDs
if authors_ids:
authors = await get_cached_authors_by_ids(authors_ids)
logger.debug(f"Topic#{topic_id} authors fetched and cached: {len(authors)} authors found.")
return authors
return []
async def invalidate_shouts_cache(cache_keys: List[str]):
"""
Инвалидирует кэш выборок публикаций по переданным ключам.
"""
for key in cache_keys:
try:
# Формируем полный ключ кэша
cache_key = f"shouts:{key}"
# Удаляем основной кэш
await redis.execute("DEL", cache_key)
logger.debug(f"Invalidated cache key: {cache_key}")
# Добавляем ключ в список инвалидированных с TTL
await redis.execute("SETEX", f"{cache_key}:invalidated", CACHE_TTL, "1")
# Если это кэш темы, инвалидируем также связанные ключи
if key.startswith("topic_"):
topic_id = key.split("_")[1]
related_keys = [
f"topic:id:{topic_id}",
f"topic:authors:{topic_id}",
f"topic:followers:{topic_id}",
f"topic:stats:{topic_id}",
]
for related_key in related_keys:
await redis.execute("DEL", related_key)
logger.debug(f"Invalidated related key: {related_key}")
except Exception as e:
logger.error(f"Error invalidating cache key {key}: {e}")
async def cache_topic_shouts(topic_id: int, shouts: List[dict]):
"""Кэширует список публикаций для темы"""
key = f"topic_shouts_{topic_id}"
payload = json.dumps(shouts, cls=CustomJSONEncoder)
await redis.execute("SETEX", key, CACHE_TTL, payload)
async def get_cached_topic_shouts(topic_id: int) -> List[dict]:
"""Получает кэшированный список публикаций для темы"""
key = f"topic_shouts_{topic_id}"
cached = await redis.execute("GET", key)
if cached:
return orjson.loads(cached)
return None
async def cache_related_entities(shout: Shout):
"""
Кэширует все связанные с публикацией сущности (авторов и темы)
"""
tasks = []
for author in shout.authors:
tasks.append(cache_by_id(Author, author.id, cache_author))
for topic in shout.topics:
tasks.append(cache_by_id(Topic, topic.id, cache_topic))
await asyncio.gather(*tasks)
async def invalidate_shout_related_cache(shout: Shout, author_id: int):
"""
Инвалидирует весь кэш, связанный с публикацией и её связями
Args:
shout: Объект публикации
author_id: ID автора
"""
cache_keys = {
"feed", # основная лента
f"author_{author_id}", # публикации автора
"random_top", # случайные топовые
"unrated", # неоцененные
"recent", # последние
"coauthored", # совместные
}
# Добавляем ключи авторов
cache_keys.update(f"author_{a.id}" for a in shout.authors)
cache_keys.update(f"authored_{a.id}" for a in shout.authors)
# Добавляем ключи тем
cache_keys.update(f"topic_{t.id}" for t in shout.topics)
cache_keys.update(f"topic_shouts_{t.id}" for t in shout.topics)
await invalidate_shouts_cache(list(cache_keys))
# Function removed - direct Redis calls used throughout the module instead
async def get_cached_entity(entity_type: str, entity_id: int, get_method, cache_method):
"""
Универсальная функция получения кэшированной сущности
Args:
entity_type: 'author' или 'topic'
entity_id: ID сущности
get_method: метод получения из БД
cache_method: метод кэширования
"""
key = f"{entity_type}:id:{entity_id}"
cached = await redis.execute("GET", key)
if cached:
return orjson.loads(cached)
entity = await get_method(entity_id)
if entity:
await cache_method(entity)
return entity
return None
async def cache_by_id(entity, entity_id: int, cache_method):
"""
Кэширует сущность по ID, используя указанный метод кэширования
Args:
entity: класс сущности (Author/Topic)
entity_id: ID сущности
cache_method: функция кэширования
"""
from resolvers.stat import get_with_stat
caching_query = select(entity).filter(entity.id == entity_id)
result = get_with_stat(caching_query)
if not result or not result[0]:
logger.warning(f"{entity.__name__} with id {entity_id} not found")
return
x = result[0]
d = x.dict()
await cache_method(d)
return d
# Универсальная функция для сохранения данных в кеш
async def cache_data(key: str, data: Any, ttl: Optional[int] = None) -> None:
"""
Сохраняет данные в кеш по указанному ключу.
Args:
key: Ключ кеша
data: Данные для сохранения
ttl: Время жизни кеша в секундах (None - бессрочно)
"""
try:
payload = json.dumps(data, cls=CustomJSONEncoder)
if ttl:
await redis.execute("SETEX", key, ttl, payload)
else:
await redis.execute("SET", key, payload)
logger.debug(f"Данные сохранены в кеш по ключу {key}")
except Exception as e:
logger.error(f"Ошибка при сохранении данных в кеш: {e}")
# Универсальная функция для получения данных из кеша
async def get_cached_data(key: str) -> Optional[Any]:
"""
Получает данные из кеша по указанному ключу.
Args:
key: Ключ кеша
Returns:
Any: Данные из кеша или None, если данных нет
"""
try:
cached_data = await redis.execute("GET", key)
if cached_data:
loaded = orjson.loads(cached_data)
logger.debug(f"Данные получены из кеша по ключу {key}: {len(loaded)}")
return loaded
return None
except Exception as e:
logger.error(f"Ошибка при получении данных из кеша: {e}")
return None
# Универсальная функция для инвалидации кеша по префиксу
async def invalidate_cache_by_prefix(prefix: str) -> None:
"""
Инвалидирует все ключи кеша с указанным префиксом.
Args:
prefix: Префикс ключей кеша для инвалидации
"""
try:
keys = await redis.execute("KEYS", f"{prefix}:*")
if keys:
await redis.execute("DEL", *keys)
logger.debug(f"Удалено {len(keys)} ключей кеша с префиксом {prefix}")
except Exception as e:
logger.error(f"Ошибка при инвалидации кеша: {e}")
# Универсальная функция для получения и кеширования данных
async def cached_query(
cache_key: str,
query_func: callable,
ttl: Optional[int] = None,
force_refresh: bool = False,
use_key_format: bool = True,
**query_params,
) -> Any:
"""
Gets data from cache or executes query and saves result to cache.
Supports existing key formats for compatibility.
Args:
cache_key: Cache key or key template from CACHE_KEYS
query_func: Function to execute the query
ttl: Cache TTL in seconds (None - indefinite)
force_refresh: Force cache refresh
use_key_format: Whether to check if cache_key matches a key template in CACHE_KEYS
**query_params: Parameters to pass to the query function
Returns:
Any: Data from cache or query result
"""
# Check if cache_key matches a pattern in CACHE_KEYS
actual_key = cache_key
if use_key_format and "{}" in cache_key:
# Look for a template match in CACHE_KEYS
for key_name, key_format in CACHE_KEYS.items():
if cache_key == key_format:
# We have a match, now look for the id or value to format with
for param_name, param_value in query_params.items():
if param_name in ["id", "slug", "user", "topic_id", "author_id"]:
actual_key = cache_key.format(param_value)
break
# If not forcing refresh, try to get data from cache
if not force_refresh:
cached_result = await get_cached_data(actual_key)
if cached_result is not None:
return cached_result
# If data not in cache or refresh required, execute query
try:
result = await query_func(**query_params)
if result is not None:
# Save result to cache
await cache_data(actual_key, result, ttl)
return result
except Exception as e:
logger.error(f"Error executing query for caching: {e}")
# In case of error, return data from cache if not forcing refresh
if not force_refresh:
return await get_cached_data(actual_key)
raise

133
cache/precache.py vendored
View File

@ -1,133 +0,0 @@
import asyncio
import json
from sqlalchemy import and_, join, select
from cache.cache import cache_author, cache_topic
from orm.author import Author, AuthorFollower
from orm.shout import Shout, ShoutAuthor, ShoutReactionsFollower, ShoutTopic
from orm.topic import Topic, TopicFollower
from resolvers.stat import get_with_stat
from services.db import local_session
from services.redis import redis
from utils.encoders import CustomJSONEncoder
from utils.logger import root_logger as logger
# Предварительное кеширование подписчиков автора
async def precache_authors_followers(author_id, session):
authors_followers = set()
followers_query = select(AuthorFollower.follower).where(AuthorFollower.author == author_id)
result = session.execute(followers_query)
authors_followers.update(row[0] for row in result if row[0])
followers_payload = json.dumps(list(authors_followers), cls=CustomJSONEncoder)
await redis.execute("SET", f"author:followers:{author_id}", followers_payload)
# Предварительное кеширование подписок автора
async def precache_authors_follows(author_id, session):
follows_topics_query = select(TopicFollower.topic).where(TopicFollower.follower == author_id)
follows_authors_query = select(AuthorFollower.author).where(AuthorFollower.follower == author_id)
follows_shouts_query = select(ShoutReactionsFollower.shout).where(ShoutReactionsFollower.follower == author_id)
follows_topics = {row[0] for row in session.execute(follows_topics_query) if row[0]}
follows_authors = {row[0] for row in session.execute(follows_authors_query) if row[0]}
follows_shouts = {row[0] for row in session.execute(follows_shouts_query) if row[0]}
topics_payload = json.dumps(list(follows_topics), cls=CustomJSONEncoder)
authors_payload = json.dumps(list(follows_authors), cls=CustomJSONEncoder)
shouts_payload = json.dumps(list(follows_shouts), cls=CustomJSONEncoder)
await asyncio.gather(
redis.execute("SET", f"author:follows-topics:{author_id}", topics_payload),
redis.execute("SET", f"author:follows-authors:{author_id}", authors_payload),
redis.execute("SET", f"author:follows-shouts:{author_id}", shouts_payload),
)
# Предварительное кеширование авторов тем
async def precache_topics_authors(topic_id: int, session):
topic_authors_query = (
select(ShoutAuthor.author)
.select_from(join(ShoutTopic, Shout, ShoutTopic.shout == Shout.id))
.join(ShoutAuthor, ShoutAuthor.shout == Shout.id)
.filter(
and_(
ShoutTopic.topic == topic_id,
Shout.published_at.is_not(None),
Shout.deleted_at.is_(None),
)
)
)
topic_authors = {row[0] for row in session.execute(topic_authors_query) if row[0]}
authors_payload = json.dumps(list(topic_authors), cls=CustomJSONEncoder)
await redis.execute("SET", f"topic:authors:{topic_id}", authors_payload)
# Предварительное кеширование подписчиков тем
async def precache_topics_followers(topic_id: int, session):
followers_query = select(TopicFollower.follower).where(TopicFollower.topic == topic_id)
topic_followers = {row[0] for row in session.execute(followers_query) if row[0]}
followers_payload = json.dumps(list(topic_followers), cls=CustomJSONEncoder)
await redis.execute("SET", f"topic:followers:{topic_id}", followers_payload)
async def precache_data():
logger.info("precaching...")
try:
key = "authorizer_env"
# cache reset
value = await redis.execute("HGETALL", key)
await redis.execute("FLUSHDB")
logger.info("redis: FLUSHDB")
# Преобразуем словарь в список аргументов для HSET
if value:
# Если значение - словарь, преобразуем его в плоский список для HSET
if isinstance(value, dict):
flattened = []
for field, val in value.items():
flattened.extend([field, val])
await redis.execute("HSET", key, *flattened)
else:
# Предполагаем, что значение уже содержит список
await redis.execute("HSET", key, *value)
logger.info(f"redis hash '{key}' was restored")
with local_session() as session:
# topics
q = select(Topic).where(Topic.community == 1)
topics = get_with_stat(q)
for topic in topics:
topic_dict = topic.dict() if hasattr(topic, "dict") else topic
await cache_topic(topic_dict)
await asyncio.gather(
precache_topics_followers(topic_dict["id"], session),
precache_topics_authors(topic_dict["id"], session),
)
logger.info(f"{len(topics)} topics and their followings precached")
# authors
authors = get_with_stat(select(Author).where(Author.user.is_not(None)))
logger.info(f"{len(authors)} authors found in database")
for author in authors:
if isinstance(author, Author):
profile = author.dict()
author_id = profile.get("id")
user_id = profile.get("user", "").strip()
if author_id and user_id:
await cache_author(profile)
await asyncio.gather(
precache_authors_followers(author_id, session), precache_authors_follows(author_id, session)
)
else:
logger.error(f"fail caching {author}")
logger.info(f"{len(authors)} authors and their followings precached")
except Exception as exc:
import traceback
traceback.print_exc()
logger.error(f"Error in precache_data: {exc}")

172
cache/revalidator.py vendored
View File

@ -1,172 +0,0 @@
import asyncio
from cache.cache import (
cache_author,
cache_topic,
get_cached_author,
get_cached_topic,
invalidate_cache_by_prefix,
)
from resolvers.stat import get_with_stat
from services.redis import redis
from utils.logger import root_logger as logger
CACHE_REVALIDATION_INTERVAL = 300 # 5 minutes
class CacheRevalidationManager:
def __init__(self, interval=CACHE_REVALIDATION_INTERVAL):
"""Инициализация менеджера с заданным интервалом проверки (в секундах)."""
self.interval = interval
self.items_to_revalidate = {"authors": set(), "topics": set(), "shouts": set(), "reactions": set()}
self.lock = asyncio.Lock()
self.running = True
self.MAX_BATCH_SIZE = 10 # Максимальное количество элементов для поштучной обработки
self._redis = redis # Добавлена инициализация _redis для доступа к Redis-клиенту
async def start(self):
"""Запуск фонового воркера для ревалидации кэша."""
# Проверяем, что у нас есть соединение с Redis
if not self._redis._client:
logger.warning("Redis connection not established. Waiting for connection...")
try:
await self._redis.connect()
logger.info("Redis connection established for revalidation manager")
except Exception as e:
logger.error(f"Failed to connect to Redis: {e}")
self.task = asyncio.create_task(self.revalidate_cache())
async def revalidate_cache(self):
"""Циклическая проверка и ревалидация кэша каждые self.interval секунд."""
try:
while self.running:
await asyncio.sleep(self.interval)
await self.process_revalidation()
except asyncio.CancelledError:
logger.info("Revalidation worker was stopped.")
except Exception as e:
logger.error(f"An error occurred in the revalidation worker: {e}")
async def process_revalidation(self):
"""Обновление кэша для всех сущностей, требующих ревалидации."""
# Проверяем соединение с Redis
if not self._redis._client:
return # Выходим из метода, если не удалось подключиться
async with self.lock:
# Ревалидация кэша авторов
if self.items_to_revalidate["authors"]:
logger.debug(f"Revalidating {len(self.items_to_revalidate['authors'])} authors")
for author_id in self.items_to_revalidate["authors"]:
if author_id == "all":
await invalidate_cache_by_prefix("authors")
break
author = await get_cached_author(author_id, get_with_stat)
if author:
await cache_author(author)
self.items_to_revalidate["authors"].clear()
# Ревалидация кэша тем
if self.items_to_revalidate["topics"]:
logger.debug(f"Revalidating {len(self.items_to_revalidate['topics'])} topics")
for topic_id in self.items_to_revalidate["topics"]:
if topic_id == "all":
await invalidate_cache_by_prefix("topics")
break
topic = await get_cached_topic(topic_id)
if topic:
await cache_topic(topic)
self.items_to_revalidate["topics"].clear()
# Ревалидация шаутов (публикаций)
if self.items_to_revalidate["shouts"]:
shouts_count = len(self.items_to_revalidate["shouts"])
logger.debug(f"Revalidating {shouts_count} shouts")
# Проверяем наличие специального флага 'all'
if "all" in self.items_to_revalidate["shouts"]:
await invalidate_cache_by_prefix("shouts")
# Если элементов много, но не 'all', используем специфический подход
elif shouts_count > self.MAX_BATCH_SIZE:
# Инвалидируем только collections keys, которые затрагивают много сущностей
collection_keys = await asyncio.create_task(self._redis.execute("KEYS", "shouts:*"))
if collection_keys:
await self._redis.execute("DEL", *collection_keys)
logger.debug(f"Удалено {len(collection_keys)} коллекционных ключей шаутов")
# Обновляем кеш каждого конкретного шаута
for shout_id in self.items_to_revalidate["shouts"]:
if shout_id != "all":
# Точечная инвалидация для каждого shout_id
specific_keys = [f"shout:id:{shout_id}"]
for key in specific_keys:
await self._redis.execute("DEL", key)
logger.debug(f"Удален ключ кеша {key}")
else:
# Если элементов немного, обрабатываем каждый
for shout_id in self.items_to_revalidate["shouts"]:
if shout_id != "all":
# Точечная инвалидация для каждого shout_id
specific_keys = [f"shout:id:{shout_id}"]
for key in specific_keys:
await self._redis.execute("DEL", key)
logger.debug(f"Удален ключ кеша {key}")
self.items_to_revalidate["shouts"].clear()
# Аналогично для реакций - точечная инвалидация
if self.items_to_revalidate["reactions"]:
reactions_count = len(self.items_to_revalidate["reactions"])
logger.debug(f"Revalidating {reactions_count} reactions")
if "all" in self.items_to_revalidate["reactions"]:
await invalidate_cache_by_prefix("reactions")
elif reactions_count > self.MAX_BATCH_SIZE:
# Инвалидируем только collections keys для реакций
collection_keys = await asyncio.create_task(self._redis.execute("KEYS", "reactions:*"))
if collection_keys:
await self._redis.execute("DEL", *collection_keys)
logger.debug(f"Удалено {len(collection_keys)} коллекционных ключей реакций")
# Точечная инвалидация для каждой реакции
for reaction_id in self.items_to_revalidate["reactions"]:
if reaction_id != "all":
specific_keys = [f"reaction:id:{reaction_id}"]
for key in specific_keys:
await self._redis.execute("DEL", key)
logger.debug(f"Удален ключ кеша {key}")
else:
# Точечная инвалидация для каждой реакции
for reaction_id in self.items_to_revalidate["reactions"]:
if reaction_id != "all":
specific_keys = [f"reaction:id:{reaction_id}"]
for key in specific_keys:
await self._redis.execute("DEL", key)
logger.debug(f"Удален ключ кеша {key}")
self.items_to_revalidate["reactions"].clear()
def mark_for_revalidation(self, entity_id, entity_type):
"""Отметить сущность для ревалидации."""
if entity_id and entity_type:
self.items_to_revalidate[entity_type].add(entity_id)
def invalidate_all(self, entity_type):
"""Пометить для инвалидации все элементы указанного типа."""
logger.debug(f"Marking all {entity_type} for invalidation")
# Особый флаг для полной инвалидации
self.items_to_revalidate[entity_type].add("all")
async def stop(self):
"""Остановка фонового воркера."""
self.running = False
if hasattr(self, "task"):
self.task.cancel()
try:
await self.task
except asyncio.CancelledError:
pass
revalidation_manager = CacheRevalidationManager()

147
cache/triggers.py vendored
View File

@ -1,147 +0,0 @@
from sqlalchemy import event
from cache.revalidator import revalidation_manager
from orm.author import Author, AuthorFollower
from orm.reaction import Reaction, ReactionKind
from orm.shout import Shout, ShoutAuthor, ShoutReactionsFollower
from orm.topic import Topic, TopicFollower
from services.db import local_session
from utils.logger import root_logger as logger
def mark_for_revalidation(entity, *args):
"""Отметка сущности для ревалидации."""
entity_type = (
"authors"
if isinstance(entity, Author)
else "topics"
if isinstance(entity, Topic)
else "reactions"
if isinstance(entity, Reaction)
else "shouts"
if isinstance(entity, Shout)
else None
)
if entity_type:
revalidation_manager.mark_for_revalidation(entity.id, entity_type)
def after_follower_handler(mapper, connection, target, is_delete=False):
"""Обработчик добавления, обновления или удаления подписки."""
entity_type = None
if isinstance(target, AuthorFollower):
entity_type = "authors"
elif isinstance(target, TopicFollower):
entity_type = "topics"
elif isinstance(target, ShoutReactionsFollower):
entity_type = "shouts"
if entity_type:
revalidation_manager.mark_for_revalidation(
target.author if entity_type == "authors" else target.topic, entity_type
)
if not is_delete:
revalidation_manager.mark_for_revalidation(target.follower, "authors")
def after_shout_handler(mapper, connection, target):
"""Обработчик изменения статуса публикации"""
if not isinstance(target, Shout):
return
# Проверяем изменение статуса публикации
# was_published = target.published_at is not None and target.deleted_at is None
# Всегда обновляем счетчики для авторов и тем при любом изменении поста
for author in target.authors:
revalidation_manager.mark_for_revalidation(author.id, "authors")
for topic in target.topics:
revalidation_manager.mark_for_revalidation(topic.id, "topics")
# Обновляем сам пост
revalidation_manager.mark_for_revalidation(target.id, "shouts")
def after_reaction_handler(mapper, connection, target):
"""Обработчик для комментариев"""
if not isinstance(target, Reaction):
return
# Проверяем что это комментарий
is_comment = target.kind == ReactionKind.COMMENT.value
# Получаем связанный пост
shout_id = target.shout if isinstance(target.shout, int) else target.shout.id
if not shout_id:
return
# Обновляем счетчики для автора комментария
if target.created_by:
revalidation_manager.mark_for_revalidation(target.created_by, "authors")
# Обновляем счетчики для поста
revalidation_manager.mark_for_revalidation(shout_id, "shouts")
if is_comment:
# Для комментариев обновляем также авторов и темы
with local_session() as session:
shout = (
session.query(Shout)
.filter(
Shout.id == shout_id,
Shout.published_at.is_not(None),
Shout.deleted_at.is_(None),
)
.first()
)
if shout:
for author in shout.authors:
revalidation_manager.mark_for_revalidation(author.id, "authors")
for topic in shout.topics:
revalidation_manager.mark_for_revalidation(topic.id, "topics")
def events_register():
"""Регистрация обработчиков событий для всех сущностей."""
event.listen(ShoutAuthor, "after_insert", mark_for_revalidation)
event.listen(ShoutAuthor, "after_update", mark_for_revalidation)
event.listen(ShoutAuthor, "after_delete", mark_for_revalidation)
event.listen(AuthorFollower, "after_insert", after_follower_handler)
event.listen(AuthorFollower, "after_update", after_follower_handler)
event.listen(
AuthorFollower,
"after_delete",
lambda *args: after_follower_handler(*args, is_delete=True),
)
event.listen(TopicFollower, "after_insert", after_follower_handler)
event.listen(TopicFollower, "after_update", after_follower_handler)
event.listen(
TopicFollower,
"after_delete",
lambda *args: after_follower_handler(*args, is_delete=True),
)
event.listen(ShoutReactionsFollower, "after_insert", after_follower_handler)
event.listen(ShoutReactionsFollower, "after_update", after_follower_handler)
event.listen(
ShoutReactionsFollower,
"after_delete",
lambda *args: after_follower_handler(*args, is_delete=True),
)
event.listen(Reaction, "after_update", mark_for_revalidation)
event.listen(Author, "after_update", mark_for_revalidation)
event.listen(Topic, "after_update", mark_for_revalidation)
event.listen(Shout, "after_update", after_shout_handler)
event.listen(Shout, "after_delete", after_shout_handler)
event.listen(Reaction, "after_insert", after_reaction_handler)
event.listen(Reaction, "after_update", after_reaction_handler)
event.listen(Reaction, "after_delete", after_reaction_handler)
logger.info("Event handlers registered successfully.")

10
checks.sh Executable file
View File

@ -0,0 +1,10 @@
#!/usr/bin/env bash
echo "> isort"
isort .
echo "> black"
black .
echo "> flake8"
flake8 .
# echo "> mypy"
# mypy .

View File

@ -1,295 +0,0 @@
# Система кеширования Discours
## Общее описание
Система кеширования Discours - это комплексное решение для повышения производительности платформы. Она использует Redis для хранения часто запрашиваемых данных и уменьшения нагрузки на основную базу данных.
Кеширование реализовано как многоуровневая система, состоящая из нескольких модулей:
- `cache.py` - основной модуль с функциями кеширования
- `revalidator.py` - асинхронный менеджер ревалидации кеша
- `triggers.py` - триггеры событий SQLAlchemy для автоматической ревалидации
- `precache.py` - предварительное кеширование данных при старте приложения
## Ключевые компоненты
### 1. Форматы ключей кеша
Система поддерживает несколько форматов ключей для обеспечения совместимости и удобства использования:
- **Ключи сущностей**: `entity:property:value` (например, `author:id:123`)
- **Ключи коллекций**: `entity:collection:params` (например, `authors:stats:limit=10:offset=0`)
- **Специальные ключи**: для обратной совместимости (например, `topic_shouts_123`)
Все стандартные форматы ключей хранятся в словаре `CACHE_KEYS`:
```python
CACHE_KEYS = {
"TOPIC_ID": "topic:id:{}",
"TOPIC_SLUG": "topic:slug:{}",
"AUTHOR_ID": "author:id:{}",
# и другие...
}
```
### 2. Основные функции кеширования
#### Структура ключей
Вместо генерации ключей через вспомогательные функции, система следует строгим конвенциям формирования ключей:
1. **Ключи для отдельных сущностей** строятся по шаблону:
```
entity:property:value
```
Например:
- `topic:id:123` - тема с ID 123
- `author:slug:john-doe` - автор со слагом "john-doe"
- `shout:id:456` - публикация с ID 456
2. **Ключи для коллекций** строятся по шаблону:
```
entity:collection[:filter1=value1:filter2=value2:...]
```
Например:
- `topics:all:basic` - базовый список всех тем
- `authors:stats:limit=10:offset=0:sort=name` - отсортированный список авторов с пагинацией
- `shouts:feed:limit=20:community=1` - лента публикаций с фильтром по сообществу
3. **Специальные форматы ключей** для обратной совместимости:
```
entity_action_id
```
Например:
- `topic_shouts_123` - публикации для темы с ID 123
Во всех модулях системы разработчики должны явно формировать ключи в соответствии с этими конвенциями, что обеспечивает единообразие и предсказуемость кеширования.
#### Работа с данными в кеше
```python
async def cache_data(key, data, ttl=None)
async def get_cached_data(key)
```
Эти функции предоставляют универсальный интерфейс для сохранения и получения данных из кеша. Они напрямую используют Redis через вызовы `redis.execute()`.
#### Высокоуровневое кеширование запросов
```python
async def cached_query(cache_key, query_func, ttl=None, force_refresh=False, **query_params)
```
Функция `cached_query` объединяет получение данных из кеша и выполнение запроса в случае отсутствия данных в кеше. Это основная функция, которую следует использовать в резолверах для кеширования результатов запросов.
### 3. Кеширование сущностей
Для основных типов сущностей реализованы специальные функции:
```python
async def cache_topic(topic: dict)
async def cache_author(author: dict)
async def get_cached_topic(topic_id: int)
async def get_cached_author(author_id: int, get_with_stat)
```
Эти функции упрощают работу с часто используемыми типами данных и обеспечивают единообразный подход к их кешированию.
### 4. Работа со связями
Для работы со связями между сущностями предназначены функции:
```python
async def cache_follows(follower_id, entity_type, entity_id, is_insert=True)
async def get_cached_topic_followers(topic_id)
async def get_cached_author_followers(author_id)
async def get_cached_follower_topics(author_id)
```
Они позволяют эффективно кешировать и получать информацию о подписках, связях между авторами, темами и публикациями.
## Система инвалидации кеша
### 1. Прямая инвалидация
Система поддерживает два типа инвалидации кеша:
#### 1.1. Инвалидация по префиксу
```python
async def invalidate_cache_by_prefix(prefix)
```
Позволяет инвалидировать все ключи кеша, начинающиеся с указанного префикса. Используется в резолверах для инвалидации группы кешей при массовых изменениях.
#### 1.2. Точечная инвалидация
```python
async def invalidate_authors_cache(author_id=None)
async def invalidate_topics_cache(topic_id=None)
```
Эти функции позволяют инвалидировать кеш только для конкретной сущности, что снижает нагрузку на Redis и предотвращает ненужную потерю кешированных данных. Если ID сущности не указан, используется инвалидация по префиксу.
Примеры использования точечной инвалидации:
```python
# Инвалидация кеша только для автора с ID 123
await invalidate_authors_cache(123)
# Инвалидация кеша только для темы с ID 456
await invalidate_topics_cache(456)
```
### 2. Отложенная инвалидация
Модуль `revalidator.py` реализует систему отложенной инвалидации кеша через класс `CacheRevalidationManager`:
```python
class CacheRevalidationManager:
def __init__(self, interval=CACHE_REVALIDATION_INTERVAL):
# ...
self._redis = redis # Прямая ссылка на сервис Redis
async def start(self):
# Проверка и установка соединения с Redis
# ...
async def process_revalidation(self):
# Обработка элементов для ревалидации
# ...
def mark_for_revalidation(self, entity_id, entity_type):
# Добавляет сущность в очередь на ревалидацию
# ...
```
Менеджер ревалидации работает как асинхронный фоновый процесс, который периодически (по умолчанию каждые 5 минут) проверяет наличие сущностей для ревалидации.
**Взаимодействие с Redis:**
- CacheRevalidationManager хранит прямую ссылку на сервис Redis через атрибут `_redis`
- При запуске проверяется наличие соединения с Redis и при необходимости устанавливается новое
- Включена автоматическая проверка соединения перед каждой операцией ревалидации
- Система самостоятельно восстанавливает соединение при его потере
**Особенности реализации:**
- Для авторов и тем используется поштучная ревалидация каждой записи
- Для шаутов и реакций используется батчевая обработка, с порогом в 10 элементов
- При достижении порога система переключается на инвалидацию коллекций вместо поштучной обработки
- Специальный флаг `all` позволяет запустить полную инвалидацию всех записей типа
### 3. Автоматическая инвалидация через триггеры
Модуль `triggers.py` регистрирует обработчики событий SQLAlchemy, которые автоматически отмечают сущности для ревалидации при изменении данных в базе:
```python
def events_register():
event.listen(Author, "after_update", mark_for_revalidation)
event.listen(Topic, "after_update", mark_for_revalidation)
# и другие...
```
Триггеры имеют следующие особенности:
- Реагируют на события вставки, обновления и удаления
- Отмечают затронутые сущности для отложенной ревалидации
- Учитывают связи между сущностями (например, при изменении темы обновляются связанные шауты)
## Предварительное кеширование
Модуль `precache.py` реализует предварительное кеширование часто используемых данных при старте приложения:
```python
async def precache_data():
# ...
```
Эта функция выполняется при запуске приложения и заполняет кеш данными, которые будут часто запрашиваться пользователями.
## Примеры использования
### Простое кеширование результата запроса
```python
async def get_topics_with_stats(limit=10, offset=0, by="title"):
# Формирование ключа кеша по конвенции
cache_key = f"topics:stats:limit={limit}:offset={offset}:sort={by}"
cached_data = await get_cached_data(cache_key)
if cached_data:
return cached_data
# Выполнение запроса к базе данных
result = ... # логика получения данных
await cache_data(cache_key, result, ttl=300)
return result
```
### Использование обобщенной функции cached_query
```python
async def get_topics_with_stats(limit=10, offset=0, by="title"):
async def fetch_data(limit, offset, by):
# Логика получения данных
return result
# Формирование ключа кеша по конвенции
cache_key = f"topics:stats:limit={limit}:offset={offset}:sort={by}"
return await cached_query(
cache_key,
fetch_data,
ttl=300,
limit=limit,
offset=offset,
by=by
)
```
### Точечная инвалидация кеша при изменении данных
```python
async def update_topic(topic_id, new_data):
# Обновление данных в базе
# ...
# Точечная инвалидация кеша только для измененной темы
await invalidate_topics_cache(topic_id)
return updated_topic
```
## Отладка и мониторинг
Система кеширования использует логгер для отслеживания операций:
```python
logger.debug(f"Данные получены из кеша по ключу {key}")
logger.debug(f"Удалено {len(keys)} ключей кеша с префиксом {prefix}")
logger.error(f"Ошибка при инвалидации кеша: {e}")
```
Это позволяет отслеживать работу кеша и выявлять возможные проблемы на ранних стадиях.
## Рекомендации по использованию
1. **Следуйте конвенциям формирования ключей** - это критически важно для консистентности и предсказуемости кеша.
2. **Не создавайте собственные форматы ключей** - используйте существующие шаблоны для обеспечения единообразия.
3. **Не забывайте об инвалидации** - всегда инвалидируйте кеш при изменении данных.
4. **Используйте точечную инвалидацию** - вместо инвалидации по префиксу для снижения нагрузки на Redis.
5. **Устанавливайте разумные TTL** - используйте разные значения TTL в зависимости от частоты изменения данных.
6. **Не кешируйте большие объемы данных** - кешируйте только то, что действительно необходимо для повышения производительности.
## Технические детали реализации
- **Сериализация данных**: используется `orjson` для эффективной сериализации и десериализации данных.
- **Форматирование даты и времени**: для корректной работы с датами используется `CustomJSONEncoder`.
- **Асинхронность**: все операции кеширования выполняются асинхронно для минимального влияния на производительность API.
- **Прямое взаимодействие с Redis**: все операции выполняются через прямые вызовы `redis.execute()` с обработкой ошибок.
- **Батчевая обработка**: для массовых операций используется пороговое значение, после которого применяются оптимизированные стратегии.
## Известные ограничения
1. **Согласованность данных** - система не гарантирует абсолютную согласованность данных в кеше и базе данных.
2. **Память** - необходимо следить за объемом данных в кеше, чтобы избежать проблем с памятью Redis.
3. **Производительность Redis** - при большом количестве операций с кешем может стать узким местом.

View File

@ -1,165 +0,0 @@
# Пагинация комментариев
## Обзор
Реализована система пагинации комментариев по веткам, которая позволяет эффективно загружать и отображать вложенные ветки обсуждений. Основные преимущества:
1. Загрузка только необходимых комментариев, а не всего дерева
2. Снижение нагрузки на сервер и клиент
3. Возможность эффективной навигации по большим обсуждениям
4. Предзагрузка первых N ответов для улучшения UX
## API для иерархической загрузки комментариев
### GraphQL запрос `load_comments_branch`
```graphql
query LoadCommentsBranch(
$shout: Int!,
$parentId: Int,
$limit: Int,
$offset: Int,
$sort: ReactionSort,
$childrenLimit: Int,
$childrenOffset: Int
) {
load_comments_branch(
shout: $shout,
parent_id: $parentId,
limit: $limit,
offset: $offset,
sort: $sort,
children_limit: $childrenLimit,
children_offset: $childrenOffset
) {
id
body
created_at
created_by {
id
name
slug
pic
}
kind
reply_to
stat {
rating
comments_count
}
first_replies {
id
body
created_at
created_by {
id
name
slug
pic
}
kind
reply_to
stat {
rating
comments_count
}
}
}
}
```
### Параметры запроса
| Параметр | Тип | По умолчанию | Описание |
|----------|-----|--------------|----------|
| shout | Int! | - | ID статьи, к которой относятся комментарии |
| parent_id | Int | null | ID родительского комментария. Если null, загружаются корневые комментарии |
| limit | Int | 10 | Максимальное количество комментариев для загрузки |
| offset | Int | 0 | Смещение для пагинации |
| sort | ReactionSort | newest | Порядок сортировки: newest, oldest, like |
| children_limit | Int | 3 | Максимальное количество дочерних комментариев для каждого родительского |
| children_offset | Int | 0 | Смещение для пагинации дочерних комментариев |
### Поля в ответе
Каждый комментарий содержит следующие основные поля:
- `id`: ID комментария
- `body`: Текст комментария
- `created_at`: Время создания
- `created_by`: Информация об авторе
- `kind`: Тип реакции (COMMENT)
- `reply_to`: ID родительского комментария (null для корневых)
- `first_replies`: Первые N дочерних комментариев
- `stat`: Статистика комментария, включающая:
- `comments_count`: Количество ответов на комментарий
- `rating`: Рейтинг комментария
## Примеры использования
### Загрузка корневых комментариев с первыми ответами
```javascript
const { data } = await client.query({
query: LOAD_COMMENTS_BRANCH,
variables: {
shout: 222,
limit: 10,
offset: 0,
sort: "newest",
childrenLimit: 3
}
});
```
### Загрузка ответов на конкретный комментарий
```javascript
const { data } = await client.query({
query: LOAD_COMMENTS_BRANCH,
variables: {
shout: 222,
parentId: 123, // ID комментария, для которого загружаем ответы
limit: 10,
offset: 0,
sort: "oldest" // Сортируем ответы от старых к новым
}
});
```
### Пагинация дочерних комментариев
Для загрузки дополнительных ответов на комментарий:
```javascript
const { data } = await client.query({
query: LOAD_COMMENTS_BRANCH,
variables: {
shout: 222,
parentId: 123,
limit: 10,
offset: 0,
childrenLimit: 5,
childrenOffset: 3 // Пропускаем первые 3 комментария (уже загруженные)
}
});
```
## Рекомендации по клиентской реализации
1. Для эффективной работы со сложными ветками обсуждений рекомендуется:
- Сначала загружать только корневые комментарии с первыми N ответами
- При наличии дополнительных ответов (когда `stat.comments_count > first_replies.length`)
добавить кнопку "Показать все ответы"
- При нажатии на кнопку загружать дополнительные ответы с помощью запроса с указанным `parentId`
2. Для сортировки:
- По умолчанию использовать `newest` для отображения свежих обсуждений
- Предусмотреть переключатель сортировки для всего дерева комментариев
- При изменении сортировки перезагружать данные с новым параметром `sort`
3. Для улучшения производительности:
- Кешировать результаты запросов на клиенте
- Использовать оптимистичные обновления при добавлении/редактировании комментариев
- При необходимости загружать комментарии порциями (ленивая загрузка)

View File

@ -1,48 +0,0 @@
## Просмотры публикаций
- Интеграция с Google Analytics для отслеживания просмотров публикаций
- Подсчет уникальных пользователей и общего количества просмотров
- Автоматическое обновление статистики при запросе данных публикации
## Мультидоменная авторизация
- Поддержка авторизации для разных доменов
- Автоматическое определение сервера авторизации
- Корректная обработка CORS для всех поддерживаемых доменов
## Система кеширования
- Redis используется в качестве основного механизма кеширования
- Поддержка как синхронных, так и асинхронных функций в декораторе cache_on_arguments
- Автоматическая сериализация/десериализация данных в JSON с использованием CustomJSONEncoder
- Резервная сериализация через pickle для сложных объектов
- Генерация уникальных ключей кеша на основе сигнатуры функции и переданных аргументов
- Настраиваемое время жизни кеша (TTL)
- Возможность ручной инвалидации кеша для конкретных функций и аргументов
## Webhooks
- Автоматическая регистрация вебхука для события user.login
- Предотвращение создания дублирующихся вебхуков
- Автоматическая очистка устаревших вебхуков
- Поддержка авторизации вебхуков через WEBHOOK_SECRET
- Обработка ошибок при операциях с вебхуками
- Динамическое определение endpoint'а на основе окружения
## CORS Configuration
- Поддерживаемые методы: GET, POST, OPTIONS
- Настроена поддержка credentials
- Разрешенные заголовки: Authorization, Content-Type, X-Requested-With, DNT, Cache-Control
- Настроено кэширование preflight-ответов на 20 дней (1728000 секунд)
## Пагинация комментариев по веткам
- Эффективная загрузка комментариев с учетом их иерархической структуры
- Отдельный запрос `load_comments_branch` для оптимизированной загрузки ветки комментариев
- Возможность загрузки корневых комментариев статьи с первыми ответами на них
- Гибкая пагинация как для корневых, так и для дочерних комментариев
- Использование поля `stat.comments_count` для отображения количества ответов на комментарий
- Добавление специального поля `first_replies` для хранения первых ответов на комментарий
- Поддержка различных методов сортировки (новые, старые, популярные)
- Оптимизированные SQL запросы для минимизации нагрузки на базу данных

View File

@ -1,94 +0,0 @@
# Following System
## Overview
System supports following different entity types:
- Authors
- Topics
- Communities
- Shouts (Posts)
## GraphQL API
### Mutations
#### follow
Follow an entity (author/topic/community/shout).
**Parameters:**
- `what: String!` - Entity type (`AUTHOR`, `TOPIC`, `COMMUNITY`, `SHOUT`)
- `slug: String` - Entity slug
- `entity_id: Int` - Optional entity ID
**Returns:**
```typescript
{
authors?: Author[] // For AUTHOR type
topics?: Topic[] // For TOPIC type
communities?: Community[] // For COMMUNITY type
shouts?: Shout[] // For SHOUT type
error?: String // Error message if any
}
```
#### unfollow
Unfollow an entity.
**Parameters:** Same as `follow`
**Returns:** Same as `follow`
### Queries
#### get_shout_followers
Get list of users who reacted to a shout.
**Parameters:**
- `slug: String` - Shout slug
- `shout_id: Int` - Optional shout ID
**Returns:**
```typescript
Author[] // List of authors who reacted
```
## Caching System
### Supported Entity Types
- Authors: `cache_author`, `get_cached_follower_authors`
- Topics: `cache_topic`, `get_cached_follower_topics`
- Communities: No cache
- Shouts: No cache
### Cache Flow
1. On follow/unfollow:
- Update entity in cache
- Update follower's following list
2. Cache is updated before notifications
## Notifications
- Sent when author is followed/unfollowed
- Contains:
- Follower info
- Author ID
- Action type ("follow"/"unfollow")
## Error Handling
- Unauthorized access check
- Entity existence validation
- Duplicate follow prevention
- Full error logging
- Transaction safety with `local_session()`
## Database Schema
### Follower Tables
- `AuthorFollower`
- `TopicFollower`
- `CommunityFollower`
- `ShoutReactionsFollower`
Each table contains:
- `follower` - ID of following user
- `{entity_type}` - ID of followed entity

View File

@ -1,80 +0,0 @@
# Система загрузки публикаций
## Особенности реализации
### Базовый запрос
- Автоматически подгружает основного автора
- Добавляет основную тему публикации
- Поддерживает гибкую систему фильтрации
- Оптимизирует запросы на основе запрошенных полей
### Статистика
- Подсчёт лайков/дислайков
- Количество комментариев
- Дата последней реакции
- Статистика подгружается только при запросе поля `stat`
### Оптимизация производительности
- Ленивая загрузка связанных данных
- Кэширование результатов на 5 минут
- Пакетная загрузка авторов и тем
- Использование подзапросов для сложных выборок
## Типы лент
### Случайные топовые посты (load_shouts_random_top)
**Преимущества:**
- Разнообразный контент
- Быстрая выборка из кэша топовых постов
- Настраиваемый размер пула для выборки
**Ограничения:**
- Обновление раз в 5 минут
- Максимальный размер пула: 100 постов
- Учитываются только лайки/дислайки (без комментариев)
### Неоцененные посты (load_shouts_unrated)
**Преимущества:**
- Помогает найти новый контент
- Равномерное распределение оценок
- Случайный порядок выдачи
**Ограничения:**
- Только посты с менее чем 3 реакциями
- Не учитываются комментарии
- Без сортировки по рейтингу
### Закладки (load_shouts_bookmarked)
**Преимущества:**
- Персонализированная выборка
- Быстрый доступ к сохраненному
- Поддержка всех фильтров
**Ограничения:**
- Требует авторизации
- Ограничение на количество закладок
- Кэширование отключено
## Важные моменты
### Пагинация
- Стандартный размер страницы: 10
- Максимальный размер: 100
- Поддержка курсор-пагинации
### Кэширование
- TTL: 5 минут
- Инвалидация при изменении поста
- Отдельный кэш для каждого типа сортировки
### Сортировка
- По рейтингу (лайки минус дислайки)
- По количеству комментариев
- По дате последней реакции
- По дате публикации (по умолчанию)
### Безопасность
- Проверка прав доступа
- Фильтрация удаленного контента
- Защита от SQL-инъекций
- Валидация входных данных

View File

@ -1,82 +0,0 @@
# Rating System
## GraphQL Resolvers
### Queries
#### get_my_rates_shouts
Get user's reactions (LIKE/DISLIKE) for specified posts.
**Parameters:**
- `shouts: [Int!]!` - array of shout IDs
**Returns:**
```typescript
[{
shout_id: Int
my_rate: ReactionKind // LIKE or DISLIKE
}]
```
#### get_my_rates_comments
Get user's reactions (LIKE/DISLIKE) for specified comments.
**Parameters:**
- `comments: [Int!]!` - array of comment IDs
**Returns:**
```typescript
[{
comment_id: Int
my_rate: ReactionKind // LIKE or DISLIKE
}]
```
### Mutations
#### rate_author
Rate another author (karma system).
**Parameters:**
- `rated_slug: String!` - author's slug
- `value: Int!` - rating value (positive/negative)
## Rating Calculation
### Author Rating Components
#### Shouts Rating
- Calculated from LIKE/DISLIKE reactions on author's posts
- Each LIKE: +1
- Each DISLIKE: -1
- Excludes deleted reactions
- Excludes comment reactions
#### Comments Rating
- Calculated from LIKE/DISLIKE reactions on author's comments
- Each LIKE: +1
- Each DISLIKE: -1
- Only counts reactions to COMMENT type reactions
- Excludes deleted reactions
#### Legacy Karma
- Based on direct author ratings via `rate_author` mutation
- Stored in `AuthorRating` table
- Each positive rating: +1
- Each negative rating: -1
### Helper Functions
- `count_author_comments_rating()` - Calculate comment rating
- `count_author_shouts_rating()` - Calculate posts rating
- `get_author_rating_old()` - Get legacy karma rating
- `get_author_rating_shouts()` - Get posts rating (optimized)
- `get_author_rating_comments()` - Get comments rating (optimized)
- `add_author_rating_columns()` - Add rating columns to author query
## Notes
- All ratings exclude deleted content
- Reactions are unique per user/content
- Rating calculations are optimized with SQLAlchemy
- System supports both direct author rating and content-based rating

1
generate_gql_types.sh Executable file
View File

@ -0,0 +1 @@
python -m gql_schema_codegen -p ./schema.graphql -t ./schema_types.py

173
main.py
View File

@ -1,6 +1,5 @@
import asyncio import asyncio
import os import os
import sys
from importlib import import_module from importlib import import_module
from os.path import exists from os.path import exists
@ -8,108 +7,88 @@ from ariadne import load_schema_from_path, make_executable_schema
from ariadne.asgi import GraphQL from ariadne.asgi import GraphQL
from starlette.applications import Starlette from starlette.applications import Starlette
from starlette.middleware import Middleware from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware from starlette.middleware.authentication import AuthenticationMiddleware
from starlette.requests import Request from starlette.middleware.sessions import SessionMiddleware
from starlette.responses import JSONResponse, Response
from starlette.routing import Route from starlette.routing import Route
from cache.precache import precache_data from auth.authenticate import JWTAuthenticate
from cache.revalidator import revalidation_manager from auth.oauth import oauth_authorize, oauth_login
from services.exception import ExceptionHandlerMiddleware from base.redis import redis
from services.redis import redis from base.resolvers import resolvers
from services.schema import create_all_tables, resolvers from orm import init_tables
from services.search import search_service from resolvers.upload import upload_handler
from services.viewed import ViewedStorage from services.main import storages_init
from services.webhook import WebhookEndpoint, create_webhook_endpoint from services.notifications.notification_service import notification_service
from settings import DEV_SERVER_PID_FILE_NAME, MODE from services.notifications.sse import sse_subscribe_handler
from services.stat.viewed import ViewedStorage
# from services.zine.gittask import GitTask
from settings import DEV_SERVER_PID_FILE_NAME, SENTRY_DSN, SESSION_SECRET_KEY
import_module("resolvers") import_module("resolvers")
schema = make_executable_schema(load_schema_from_path("schema/"), resolvers) schema = make_executable_schema(load_schema_from_path("schema.graphql"), resolvers)
async def start():
if MODE == "development":
if not exists(DEV_SERVER_PID_FILE_NAME):
# pid file management
with open(DEV_SERVER_PID_FILE_NAME, "w", encoding="utf-8") as f:
f.write(str(os.getpid()))
print(f"[main] process started in {MODE} mode")
async def lifespan(_app):
try:
create_all_tables()
await asyncio.gather(
redis.connect(),
precache_data(),
ViewedStorage.init(),
create_webhook_endpoint(),
search_service.info(),
start(),
revalidation_manager.start(),
)
yield
finally:
tasks = [redis.disconnect(), ViewedStorage.stop(), revalidation_manager.stop()]
await asyncio.gather(*tasks, return_exceptions=True)
# Создаем экземпляр GraphQL
graphql_app = GraphQL(schema, debug=True)
# Оборачиваем GraphQL-обработчик для лучшей обработки ошибок
async def graphql_handler(request: Request):
if request.method not in ["GET", "POST"]:
return JSONResponse({"error": "Method Not Allowed"}, status_code=405)
try:
result = await graphql_app.handle_request(request)
if isinstance(result, Response):
return result
return JSONResponse(result)
except asyncio.CancelledError:
return JSONResponse({"error": "Request cancelled"}, status_code=499)
except Exception as e:
print(f"GraphQL error: {str(e)}")
return JSONResponse({"error": str(e)}, status_code=500)
middleware = [ middleware = [
# Начинаем с обработки ошибок Middleware(AuthenticationMiddleware, backend=JWTAuthenticate()),
Middleware(ExceptionHandlerMiddleware), Middleware(SessionMiddleware, secret_key=SESSION_SECRET_KEY),
# CORS должен быть перед другими middleware для корректной обработки preflight-запросов
Middleware(
CORSMiddleware,
allow_origins=[
"https://localhost:3000",
"https://testing.discours.io",
"https://testing3.discours.io",
"https://discours.io",
"https://new.discours.io"
],
allow_methods=["GET", "POST", "OPTIONS"], # Явно указываем OPTIONS
allow_headers=["*"],
allow_credentials=True,
),
] ]
# Обновляем маршрут в Starlette
app = Starlette(
routes=[
Route("/", graphql_handler, methods=["GET", "POST"]),
Route("/new-author", WebhookEndpoint),
],
middleware=middleware,
lifespan=lifespan,
debug=True,
)
app.add_middleware(ExceptionHandlerMiddleware) async def start_up():
if "dev" in sys.argv: init_tables()
app.add_middleware( await redis.connect()
CORSMiddleware, await storages_init()
allow_origins=["https://localhost:3000"], views_stat_task = asyncio.create_task(ViewedStorage().worker())
allow_credentials=True, print(views_stat_task)
allow_methods=["*"], # git_task = asyncio.create_task(GitTask.git_task_worker())
allow_headers=["*"], # print(git_task)
) notification_service_task = asyncio.create_task(notification_service.worker())
print(notification_service_task)
try:
import sentry_sdk
sentry_sdk.init(SENTRY_DSN)
except Exception as e:
print("[sentry] init error")
print(e)
async def dev_start_up():
if exists(DEV_SERVER_PID_FILE_NAME):
await redis.connect()
return
else:
with open(DEV_SERVER_PID_FILE_NAME, "w", encoding="utf-8") as f:
f.write(str(os.getpid()))
await start_up()
async def shutdown():
await redis.disconnect()
routes = [
Route("/oauth/{provider}", endpoint=oauth_login),
Route("/oauth-authorize", endpoint=oauth_authorize),
Route("/upload", endpoint=upload_handler, methods=["POST"]),
Route("/subscribe/{user_id}", endpoint=sse_subscribe_handler),
]
app = Starlette(
on_startup=[start_up],
on_shutdown=[shutdown],
middleware=middleware,
routes=routes,
)
app.mount("/", GraphQL(schema))
dev_app = Starlette(
debug=True,
on_startup=[dev_start_up],
on_shutdown=[shutdown],
middleware=middleware,
routes=routes,
)
dev_app.mount("/", GraphQL(schema, debug=True))

18
migrate.sh Normal file
View File

@ -0,0 +1,18 @@
database_name="discoursio"
echo "DATABASE MIGRATION STARTED"
echo "Dropping database $database_name"
dropdb $database_name --force
if [ $? -ne 0 ]; then { echo "Failed to drop database, aborting." ; exit 1; } fi
echo "Database $database_name dropped"
echo "Creating database $database_name"
createdb $database_name
if [ $? -ne 0 ]; then { echo "Failed to create database, aborting." ; exit 1; } fi
echo "Database $database_name successfully created"
echo "Start migration"
python3 server.py migrate
if [ $? -ne 0 ]; then { echo "Migration failed, aborting." ; exit 1; } fi
echo 'Done!'

279
migration/__init__.py Normal file
View File

@ -0,0 +1,279 @@
""" cmd managed migration """
import asyncio
import gc
import json
import sys
from datetime import datetime, timezone
import bs4
from migration.export import export_mdx
from migration.tables.comments import migrate as migrateComment
from migration.tables.comments import migrate_2stage as migrateComment_2stage
from migration.tables.content_items import get_shout_slug
from migration.tables.content_items import migrate as migrateShout
# from migration.tables.remarks import migrate as migrateRemark
from migration.tables.topics import migrate as migrateTopic
from migration.tables.users import migrate as migrateUser
from migration.tables.users import migrate_2stage as migrateUser_2stage
from migration.tables.users import post_migrate as users_post_migrate
from orm import init_tables
from orm.reaction import Reaction
TODAY = datetime.strftime(datetime.now(tz=timezone.utc), "%Y%m%d")
OLD_DATE = "2016-03-05 22:22:00.350000"
async def users_handle(storage):
"""migrating users first"""
counter = 0
id_map = {}
print("[migration] migrating %d users" % (len(storage["users"]["data"])))
for entry in storage["users"]["data"]:
oid = entry["_id"]
user = migrateUser(entry)
storage["users"]["by_oid"][oid] = user # full
del user["password"]
del user["emailConfirmed"]
del user["username"]
del user["email"]
storage["users"]["by_slug"][user["slug"]] = user # public
id_map[user["oid"]] = user["slug"]
counter += 1
ce = 0
for entry in storage["users"]["data"]:
ce += migrateUser_2stage(entry, id_map)
users_post_migrate()
async def topics_handle(storage):
"""topics from categories and tags"""
counter = 0
for t in storage["topics"]["tags"] + storage["topics"]["cats"]:
if t["slug"] in storage["replacements"]:
t["slug"] = storage["replacements"][t["slug"]]
topic = migrateTopic(t)
storage["topics"]["by_oid"][t["_id"]] = topic
storage["topics"]["by_slug"][t["slug"]] = topic
counter += 1
else:
print("[migration] topic " + t["slug"] + " ignored")
for oldslug, newslug in storage["replacements"].items():
if oldslug != newslug and oldslug in storage["topics"]["by_slug"]:
oid = storage["topics"]["by_slug"][oldslug]["_id"]
del storage["topics"]["by_slug"][oldslug]
storage["topics"]["by_oid"][oid] = storage["topics"]["by_slug"][newslug]
print("[migration] " + str(counter) + " topics migrated")
print("[migration] " + str(len(storage["topics"]["by_oid"].values())) + " topics by oid")
print("[migration] " + str(len(storage["topics"]["by_slug"].values())) + " topics by slug")
async def shouts_handle(storage, args):
"""migrating content items one by one"""
counter = 0
discours_author = 0
anonymous_author = 0
pub_counter = 0
ignored = 0
topics_dataset_bodies = []
topics_dataset_tlist = []
for entry in storage["shouts"]["data"]:
gc.collect()
# slug
slug = get_shout_slug(entry)
# single slug mode
if "-" in args and slug not in args:
continue
# migrate
shout_dict = await migrateShout(entry, storage)
if shout_dict:
storage["shouts"]["by_oid"][entry["_id"]] = shout_dict
storage["shouts"]["by_slug"][shout_dict["slug"]] = shout_dict
# shouts.topics
if not shout_dict["topics"]:
print("[migration] no topics!")
# with author
author = shout_dict["authors"][0]
if author["slug"] == "discours":
discours_author += 1
if author["slug"] == "anonymous":
anonymous_author += 1
# print('[migration] ' + shout['slug'] + ' with author ' + author)
if entry.get("published"):
if "mdx" in args:
export_mdx(shout_dict)
pub_counter += 1
# print main counter
counter += 1
print(
"[migration] shouts_handle %d: %s @%s"
% ((counter + 1), shout_dict["slug"], author["slug"])
)
b = bs4.BeautifulSoup(shout_dict["body"], "html.parser")
texts = [shout_dict["title"].lower().replace(r"[^а-яА-Яa-zA-Z]", "")]
texts = texts + b.findAll(text=True)
topics_dataset_bodies.append(" ".join([x.strip().lower() for x in texts]))
topics_dataset_tlist.append(shout_dict["topics"])
else:
ignored += 1
# np.savetxt('topics_dataset.csv', (topics_dataset_bodies, topics_dataset_tlist), delimiter=',
# ', fmt='%s')
print("[migration] " + str(counter) + " content items were migrated")
print("[migration] " + str(pub_counter) + " have been published")
print("[migration] " + str(discours_author) + " authored by @discours")
print("[migration] " + str(anonymous_author) + " authored by @anonymous")
# async def remarks_handle(storage):
# print("[migration] comments")
# c = 0
# for entry_remark in storage["remarks"]["data"]:
# remark = await migrateRemark(entry_remark, storage)
# c += 1
# print("[migration] " + str(c) + " remarks migrated")
async def comments_handle(storage):
print("[migration] comments")
id_map = {}
ignored_counter = 0
missed_shouts = {}
for oldcomment in storage["reactions"]["data"]:
if not oldcomment.get("deleted"):
reaction = await migrateComment(oldcomment, storage)
if isinstance(reaction, str):
missed_shouts[reaction] = oldcomment
elif isinstance(reaction, Reaction):
reaction = reaction.dict()
rid = reaction["id"]
oid = reaction["oid"]
id_map[oid] = rid
else:
ignored_counter += 1
for reaction in storage["reactions"]["data"]:
migrateComment_2stage(reaction, id_map)
print("[migration] " + str(len(id_map)) + " comments migrated")
print("[migration] " + str(ignored_counter) + " comments ignored")
print("[migration] " + str(len(missed_shouts.keys())) + " commented shouts missed")
missed_counter = 0
for missed in missed_shouts.values():
missed_counter += len(missed)
print("[migration] " + str(missed_counter) + " comments dropped")
async def all_handle(storage, args):
print("[migration] handle everything")
await users_handle(storage)
await topics_handle(storage)
print("[migration] users and topics are migrated")
await shouts_handle(storage, args)
# print("[migration] remarks...")
# await remarks_handle(storage)
print("[migration] migrating comments")
await comments_handle(storage)
# export_email_subscriptions()
print("[migration] done!")
def data_load():
storage = {
"content_items": {
"by_oid": {},
"by_slug": {},
},
"shouts": {"by_oid": {}, "by_slug": {}, "data": []},
"reactions": {"by_oid": {}, "by_slug": {}, "by_content": {}, "data": []},
"topics": {
"by_oid": {},
"by_slug": {},
"cats": [],
"tags": [],
},
"remarks": {"data": []},
"users": {"by_oid": {}, "by_slug": {}, "data": []},
"replacements": json.loads(open("migration/tables/replacements.json").read()),
}
try:
users_data = json.loads(open("migration/data/users.json").read())
print("[migration.load] " + str(len(users_data)) + " users ")
tags_data = json.loads(open("migration/data/tags.json").read())
storage["topics"]["tags"] = tags_data
print("[migration.load] " + str(len(tags_data)) + " tags ")
cats_data = json.loads(open("migration/data/content_item_categories.json").read())
storage["topics"]["cats"] = cats_data
print("[migration.load] " + str(len(cats_data)) + " cats ")
comments_data = json.loads(open("migration/data/comments.json").read())
storage["reactions"]["data"] = comments_data
print("[migration.load] " + str(len(comments_data)) + " comments ")
content_data = json.loads(open("migration/data/content_items.json").read())
storage["shouts"]["data"] = content_data
print("[migration.load] " + str(len(content_data)) + " content items ")
remarks_data = json.loads(open("migration/data/remarks.json").read())
storage["remarks"]["data"] = remarks_data
print("[migration.load] " + str(len(remarks_data)) + " remarks data ")
# fill out storage
for x in users_data:
storage["users"]["by_oid"][x["_id"]] = x
# storage['users']['by_slug'][x['slug']] = x
# no user.slug yet
print("[migration.load] " + str(len(storage["users"]["by_oid"].keys())) + " users by oid")
for x in tags_data:
storage["topics"]["by_oid"][x["_id"]] = x
storage["topics"]["by_slug"][x["slug"]] = x
for x in cats_data:
storage["topics"]["by_oid"][x["_id"]] = x
storage["topics"]["by_slug"][x["slug"]] = x
print(
"[migration.load] " + str(len(storage["topics"]["by_slug"].keys())) + " topics by slug"
)
for item in content_data:
slug = get_shout_slug(item)
storage["content_items"]["by_slug"][slug] = item
storage["content_items"]["by_oid"][item["_id"]] = item
print("[migration.load] " + str(len(content_data)) + " content items")
for x in comments_data:
storage["reactions"]["by_oid"][x["_id"]] = x
cid = x["contentItem"]
storage["reactions"]["by_content"][cid] = x
ci = storage["content_items"]["by_oid"].get(cid, {})
if "slug" in ci:
storage["reactions"]["by_slug"][ci["slug"]] = x
print(
"[migration.load] "
+ str(len(storage["reactions"]["by_content"].keys()))
+ " with comments"
)
storage["users"]["data"] = users_data
storage["topics"]["tags"] = tags_data
storage["topics"]["cats"] = cats_data
storage["shouts"]["data"] = content_data
storage["reactions"]["data"] = comments_data
except Exception as e:
raise e
return storage
async def handling_migration():
init_tables()
await all_handle(data_load(), sys.argv)
def process():
loop = asyncio.get_event_loop()
loop.run_until_complete(handling_migration())
if __name__ == "__main__":
process()

33
migration/bson2json.py Normal file
View File

@ -0,0 +1,33 @@
import gc
import json
import os
import bson
from .utils import DateTimeEncoder
def json_tables():
print("[migration] unpack dump/discours/*.bson to migration/data/*.json")
data = {
"content_items": [],
"content_item_categories": [],
"tags": [],
"email_subscriptions": [],
"users": [],
"comments": [],
"remarks": [],
}
for table in data.keys():
print("[migration] bson2json for " + table)
gc.collect()
lc = []
bs = open("dump/discours/" + table + ".bson", "rb").read()
base = 0
while base < len(bs):
base, d = bson.decode_document(bs, base)
lc.append(d)
data[table] = lc
open(os.getcwd() + "/migration/data/" + table + ".json", "w").write(
json.dumps(lc, cls=DateTimeEncoder)
)

0
migration/data/.gitkeep Normal file
View File

137
migration/export.py Normal file
View File

@ -0,0 +1,137 @@
import json
import os
from datetime import datetime, timezone
import frontmatter
from .extract import extract_html, extract_media
from .utils import DateTimeEncoder
OLD_DATE = "2016-03-05 22:22:00.350000"
EXPORT_DEST = "../discoursio-web/data/"
parentDir = "/".join(os.getcwd().split("/")[:-1])
contentDir = parentDir + "/discoursio-web/content/"
ts = datetime.now(tz=timezone.utc)
def get_metadata(r):
authors = []
for a in r["authors"]:
authors.append(
{ # a short version for public listings
"slug": a.slug or "discours",
"name": a.name or "Дискурс",
"userpic": a.userpic or "https://discours.io/static/img/discours.png",
}
)
metadata = {}
metadata["title"] = r.get("title", "").replace("{", "(").replace("}", ")")
metadata["authors"] = authors
metadata["createdAt"] = r.get("createdAt", ts)
metadata["layout"] = r["layout"]
metadata["topics"] = [topic for topic in r["topics"]]
metadata["topics"].sort()
if r.get("cover", False):
metadata["cover"] = r.get("cover")
return metadata
def export_mdx(r):
# print('[export] mdx %s' % r['slug'])
content = ""
metadata = get_metadata(r)
content = frontmatter.dumps(frontmatter.Post(r["body"], **metadata))
ext = "mdx"
filepath = contentDir + r["slug"]
bc = bytes(content, "utf-8").decode("utf-8", "ignore")
open(filepath + "." + ext, "w").write(bc)
def export_body(shout, storage):
entry = storage["content_items"]["by_oid"][shout["oid"]]
if entry:
body = extract_html(entry)
media = extract_media(entry)
shout["body"] = body # prepare_html_body(entry) # prepare_md_body(entry)
shout["media"] = media
export_mdx(shout)
print("[export] html for %s" % shout["slug"])
open(contentDir + shout["slug"] + ".html", "w").write(body)
else:
raise Exception("no content_items entry found")
def export_slug(slug, storage):
shout = storage["shouts"]["by_slug"][slug]
shout = storage["shouts"]["by_slug"].get(slug)
assert shout, "[export] no shout found by slug: %s " % slug
author = shout["authors"][0]
assert author, "[export] no author error"
export_body(shout, storage)
def export_email_subscriptions():
email_subscriptions_data = json.loads(open("migration/data/email_subscriptions.json").read())
for data in email_subscriptions_data:
# TODO: migrate to mailgun list manually
# migrate_email_subscription(data)
pass
print("[migration] " + str(len(email_subscriptions_data)) + " email subscriptions exported")
def export_shouts(storage):
# update what was just migrated or load json again
if len(storage["users"]["by_slugs"].keys()) == 0:
storage["users"]["by_slugs"] = json.loads(open(EXPORT_DEST + "authors.json").read())
print("[migration] " + str(len(storage["users"]["by_slugs"].keys())) + " exported authors ")
if len(storage["shouts"]["by_slugs"].keys()) == 0:
storage["shouts"]["by_slugs"] = json.loads(open(EXPORT_DEST + "articles.json").read())
print(
"[migration] " + str(len(storage["shouts"]["by_slugs"].keys())) + " exported articles "
)
for slug in storage["shouts"]["by_slugs"].keys():
export_slug(slug, storage)
def export_json(export_articles={}, export_authors={}, export_topics={}, export_comments={}):
open(EXPORT_DEST + "authors.json", "w").write(
json.dumps(
export_authors,
cls=DateTimeEncoder,
indent=4,
sort_keys=True,
ensure_ascii=False,
)
)
print("[migration] " + str(len(export_authors.items())) + " authors exported")
open(EXPORT_DEST + "topics.json", "w").write(
json.dumps(
export_topics,
cls=DateTimeEncoder,
indent=4,
sort_keys=True,
ensure_ascii=False,
)
)
print("[migration] " + str(len(export_topics.keys())) + " topics exported")
open(EXPORT_DEST + "articles.json", "w").write(
json.dumps(
export_articles,
cls=DateTimeEncoder,
indent=4,
sort_keys=True,
ensure_ascii=False,
)
)
print("[migration] " + str(len(export_articles.items())) + " articles exported")
open(EXPORT_DEST + "comments.json", "w").write(
json.dumps(
export_comments,
cls=DateTimeEncoder,
indent=4,
sort_keys=True,
ensure_ascii=False,
)
)
print("[migration] " + str(len(export_comments.items())) + " exported articles with comments")

276
migration/extract.py Normal file
View File

@ -0,0 +1,276 @@
import os
import re
from bs4 import BeautifulSoup
TOOLTIP_REGEX = r"(\/\/\/(.+)\/\/\/)"
contentDir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..", "..", "discoursio-web", "content"
)
cdn = "https://images.discours.io"
def replace_tooltips(body):
# change if you prefer regexp
newbody = body
matches = list(re.finditer(TOOLTIP_REGEX, body, re.IGNORECASE | re.MULTILINE))[1:]
for match in matches:
newbody = body.replace(
match.group(1), '<Tooltip text="' + match.group(2) + '" />'
) # NOTE: doesn't work
if len(matches) > 0:
print("[extract] found %d tooltips" % len(matches))
return newbody
# def extract_footnotes(body, shout_dict):
# parts = body.split("&&&")
# lll = len(parts)
# newparts = list(parts)
# placed = False
# if lll & 1:
# if lll > 1:
# i = 1
# print("[extract] found %d footnotes in body" % (lll - 1))
# for part in parts[1:]:
# if i & 1:
# placed = True
# if 'a class="footnote-url" href=' in part:
# print("[extract] footnote: " + part)
# fn = 'a class="footnote-url" href="'
# exxtracted_link = part.split(fn, 1)[1].split('"', 1)[0]
# extracted_body = part.split(fn, 1)[1].split(">", 1)[1].split("</a>", 1)[0]
# print("[extract] footnote link: " + extracted_link)
# with local_session() as session:
# Reaction.create(
# {
# "shout": shout_dict["id"],
# "kind": ReactionKind.FOOTNOTE,
# "body": extracted_body,
# "range": str(body.index(fn + link) - len("<"))
# + ":"
# + str(body.index(extracted_body) + len("</a>")),
# }
# )
# newparts[i] = "<a href='#'></a>"
# else:
# newparts[i] = part
# i += 1
# return ("".join(newparts), placed)
# def place_tooltips(body):
# parts = body.split("&&&")
# lll = len(parts)
# newparts = list(parts)
# placed = False
# if lll & 1:
# if lll > 1:
# i = 1
# print("[extract] found %d tooltips" % (lll - 1))
# for part in parts[1:]:
# if i & 1:
# placed = True
# if 'a class="footnote-url" href=' in part:
# print("[extract] footnote: " + part)
# fn = 'a class="footnote-url" href="'
# link = part.split(fn, 1)[1].split('"', 1)[0]
# extracted_part = part.split(fn, 1)[0] + " " + part.split("/", 1)[-1]
# newparts[i] = (
# "<Tooltip"
# + (' link="' + link + '" ' if link else "")
# + ">"
# + extracted_part
# + "</Tooltip>"
# )
# else:
# newparts[i] = "<Tooltip>%s</Tooltip>" % part
# # print('[extract] ' + newparts[i])
# else:
# # print('[extract] ' + part[:10] + '..')
# newparts[i] = part
# i += 1
# return ("".join(newparts), placed)
IMG_REGEX = (
r"\!\[(.*?)\]\((data\:image\/(png|jpeg|jpg);base64\,((?:[A-Za-z\d+\/]{4})*(?:[A-Za-z\d+\/]{3}="
)
IMG_REGEX += r"|[A-Za-z\d+\/]{2}==)))\)"
parentDir = "/".join(os.getcwd().split("/")[:-1])
public = parentDir + "/discoursio-web/public"
cache = {}
# def reextract_images(body, oid):
# # change if you prefer regexp
# matches = list(re.finditer(IMG_REGEX, body, re.IGNORECASE | re.MULTILINE))[1:]
# i = 0
# for match in matches:
# print("[extract] image " + match.group(1))
# ext = match.group(3)
# name = oid + str(i)
# link = public + "/upload/image-" + name + "." + ext
# img = match.group(4)
# title = match.group(1) # NOTE: this is not the title
# if img not in cache:
# content = base64.b64decode(img + "==")
# print(str(len(img)) + " image bytes been written")
# open("../" + link, "wb").write(content)
# cache[img] = name
# i += 1
# else:
# print("[extract] image cached " + cache[img])
# body.replace(
# str(match), "![" + title + "](" + cdn + link + ")"
# ) # WARNING: this does not work
# return body
IMAGES = {
"data:image/png": "png",
"data:image/jpg": "jpg",
"data:image/jpeg": "jpg",
}
b64 = ";base64,"
di = "data:image"
def extract_media(entry):
"""normalized media extraction method"""
# media [ { title pic url body } ]}
kind = entry.get("type")
if not kind:
print(entry)
raise Exception("shout no layout")
media = []
for m in entry.get("media") or []:
# title
title = m.get("title", "").replace("\n", " ").replace("&nbsp;", " ")
artist = m.get("performer") or m.get("artist")
if artist:
title = artist + " - " + title
# pic
url = m.get("fileUrl") or m.get("url", "")
pic = ""
if m.get("thumborId"):
pic = cdn + "/unsafe/" + m["thumborId"]
# url
if not url:
if kind == "Image":
url = pic
elif "youtubeId" in m:
url = "https://youtube.com/?watch=" + m["youtubeId"]
elif "vimeoId" in m:
url = "https://vimeo.com/" + m["vimeoId"]
# body
body = m.get("body") or m.get("literatureBody") or ""
media.append({"url": url, "pic": pic, "title": title, "body": body})
return media
def prepare_html_body(entry):
# body modifications
body = ""
kind = entry.get("type")
addon = ""
if kind == "Video":
addon = ""
for m in entry.get("media") or []:
if "youtubeId" in m:
addon += '<iframe width="420" height="345" src="http://www.youtube.com/embed/'
addon += m["youtubeId"]
addon += '?autoplay=1" frameborder="0" allowfullscreen></iframe>\n'
elif "vimeoId" in m:
addon += '<iframe src="https://player.vimeo.com/video/'
addon += m["vimeoId"]
addon += ' width="420" height="345" frameborder="0" allow="autoplay; fullscreen"'
addon += " allowfullscreen></iframe>"
else:
print("[extract] media is not supported")
print(m)
body += addon
elif kind == "Music":
addon = ""
for m in entry.get("media") or []:
artist = m.get("performer")
trackname = ""
if artist:
trackname += artist + " - "
if "title" in m:
trackname += m.get("title", "")
addon += "<figure><figcaption>"
addon += trackname
addon += '</figcaption><audio controls src="'
addon += m.get("fileUrl", "")
addon += '"></audio></figure>'
body += addon
body = extract_html(entry)
# if body_orig: body += extract_md(html2text(body_orig), entry['_id'])
return body
def cleanup_html(body: str) -> str:
new_body = body
regex_remove = [
r"style=\"width:\s*\d+px;height:\s*\d+px;\"",
r"style=\"width:\s*\d+px;\"",
r"style=\"color: #000000;\"",
r"style=\"float: none;\"",
r"style=\"background: white;\"",
r"class=\"Apple-interchange-newline\"",
r"class=\"MsoNormalCxSpMiddle\"",
r"class=\"MsoNormal\"",
r"lang=\"EN-US\"",
r"id=\"docs-internal-guid-[\w-]+\"",
r"<p>\s*</p>",
r"<span></span>",
r"<i>\s*</i>",
r"<b>\s*</b>",
r"<h1>\s*</h1>",
r"<h2>\s*</h2>",
r"<h3>\s*</h3>",
r"<h4>\s*</h4>",
r"<div>\s*</div>",
]
regex_replace = {r"<br>\s*</p>": "</p>"}
changed = True
while changed:
# we need several iterations to clean nested tags this way
changed = False
new_body_iteration = new_body
for regex in regex_remove:
new_body = re.sub(regex, "", new_body)
for regex, replace in regex_replace.items():
new_body = re.sub(regex, replace, new_body)
if new_body_iteration != new_body:
changed = True
return new_body
def extract_html(entry, shout_id=None, cleanup=False):
body_orig = (entry.get("body") or "").replace(r"\(", "(").replace(r"\)", ")")
if cleanup:
# we do that before bs parsing to catch the invalid html
body_clean = cleanup_html(body_orig)
if body_clean != body_orig:
print(f"[migration] html cleaned for slug {entry.get('slug', None)}")
body_orig = body_clean
# if shout_id:
# extract_footnotes(body_orig, shout_id)
body_html = str(BeautifulSoup(body_orig, features="html.parser"))
if cleanup:
# we do that after bs parsing because it can add dummy tags
body_clean_html = cleanup_html(body_html)
if body_clean_html != body_html:
print(f"[migration] html cleaned after bs4 for slug {entry.get('slug', None)}")
body_html = body_clean_html
return body_html

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,3 @@
from .cli import main
main()

318
migration/html2text/cli.py Normal file
View File

@ -0,0 +1,318 @@
import argparse
import sys
from . import HTML2Text, __version__, config
# noinspection DuplicatedCode
def main() -> None:
baseurl = ""
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
p = argparse.ArgumentParser()
p.add_argument(
"--default-image-alt",
dest="default_image_alt",
default=config.DEFAULT_IMAGE_ALT,
help="The default alt string for images with missing ones",
)
p.add_argument(
"--pad-tables",
dest="pad_tables",
action="store_true",
default=config.PAD_TABLES,
help="pad the cells to equal column width in tables",
)
p.add_argument(
"--no-wrap-links",
dest="wrap_links",
action="store_false",
default=config.WRAP_LINKS,
help="don't wrap links during conversion",
)
p.add_argument(
"--wrap-list-items",
dest="wrap_list_items",
action="store_true",
default=config.WRAP_LIST_ITEMS,
help="wrap list items during conversion",
)
p.add_argument(
"--wrap-tables",
dest="wrap_tables",
action="store_true",
default=config.WRAP_TABLES,
help="wrap tables",
)
p.add_argument(
"--ignore-emphasis",
dest="ignore_emphasis",
action="store_true",
default=config.IGNORE_EMPHASIS,
help="don't include any formatting for emphasis",
)
p.add_argument(
"--reference-links",
dest="inline_links",
action="store_false",
default=config.INLINE_LINKS,
help="use reference style links instead of inline links",
)
p.add_argument(
"--ignore-links",
dest="ignore_links",
action="store_true",
default=config.IGNORE_ANCHORS,
help="don't include any formatting for links",
)
p.add_argument(
"--ignore-mailto-links",
action="store_true",
dest="ignore_mailto_links",
default=config.IGNORE_MAILTO_LINKS,
help="don't include mailto: links",
)
p.add_argument(
"--protect-links",
dest="protect_links",
action="store_true",
default=config.PROTECT_LINKS,
help="protect links from line breaks surrounding them with angle brackets",
)
p.add_argument(
"--ignore-images",
dest="ignore_images",
action="store_true",
default=config.IGNORE_IMAGES,
help="don't include any formatting for images",
)
p.add_argument(
"--images-as-html",
dest="images_as_html",
action="store_true",
default=config.IMAGES_AS_HTML,
help=(
"Always write image tags as raw html; preserves `height`, `width` and "
"`alt` if possible."
),
)
p.add_argument(
"--images-to-alt",
dest="images_to_alt",
action="store_true",
default=config.IMAGES_TO_ALT,
help="Discard image data, only keep alt text",
)
p.add_argument(
"--images-with-size",
dest="images_with_size",
action="store_true",
default=config.IMAGES_WITH_SIZE,
help=("Write image tags with height and width attrs as raw html to retain " "dimensions"),
)
p.add_argument(
"-g",
"--google-doc",
action="store_true",
dest="google_doc",
default=False,
help="convert an html-exported Google Document",
)
p.add_argument(
"-d",
"--dash-unordered-list",
action="store_true",
dest="ul_style_dash",
default=False,
help="use a dash rather than a star for unordered list items",
)
p.add_argument(
"-e",
"--asterisk-emphasis",
action="store_true",
dest="em_style_asterisk",
default=False,
help="use an asterisk rather than an underscore for emphasized text",
)
p.add_argument(
"-b",
"--body-width",
dest="body_width",
type=int,
default=config.BODY_WIDTH,
help="number of characters per output line, 0 for no wrap",
)
p.add_argument(
"-i",
"--google-list-indent",
dest="list_indent",
type=int,
default=config.GOOGLE_LIST_INDENT,
help="number of pixels Google indents nested lists",
)
p.add_argument(
"-s",
"--hide-strikethrough",
action="store_true",
dest="hide_strikethrough",
default=False,
help="hide strike-through text. only relevant when -g is " "specified as well",
)
p.add_argument(
"--escape-all",
action="store_true",
dest="escape_snob",
default=False,
help=(
"Escape all special characters. Output is less readable, but avoids "
"corner case formatting issues."
),
)
p.add_argument(
"--bypass-tables",
action="store_true",
dest="bypass_tables",
default=config.BYPASS_TABLES,
help="Format tables in HTML rather than Markdown syntax.",
)
p.add_argument(
"--ignore-tables",
action="store_true",
dest="ignore_tables",
default=config.IGNORE_TABLES,
help="Ignore table-related tags (table, th, td, tr) " "while keeping rows.",
)
p.add_argument(
"--single-line-break",
action="store_true",
dest="single_line_break",
default=config.SINGLE_LINE_BREAK,
help=(
"Use a single line break after a block element rather than two line "
"breaks. NOTE: Requires --body-width=0"
),
)
p.add_argument(
"--unicode-snob",
action="store_true",
dest="unicode_snob",
default=config.UNICODE_SNOB,
help="Use unicode throughout document",
)
p.add_argument(
"--no-automatic-links",
action="store_false",
dest="use_automatic_links",
default=config.USE_AUTOMATIC_LINKS,
help="Do not use automatic links wherever applicable",
)
p.add_argument(
"--no-skip-internal-links",
action="store_false",
dest="skip_internal_links",
default=config.SKIP_INTERNAL_LINKS,
help="Do not skip internal links",
)
p.add_argument(
"--links-after-para",
action="store_true",
dest="links_each_paragraph",
default=config.LINKS_EACH_PARAGRAPH,
help="Put links after each paragraph instead of document",
)
p.add_argument(
"--mark-code",
action="store_true",
dest="mark_code",
default=config.MARK_CODE,
help="Mark program code blocks with [code]...[/code]",
)
p.add_argument(
"--decode-errors",
dest="decode_errors",
default=config.DECODE_ERRORS,
help=(
"What to do in case of decode errors.'ignore', 'strict' and 'replace' are "
"acceptable values"
),
)
p.add_argument(
"--open-quote",
dest="open_quote",
default=config.OPEN_QUOTE,
help="The character used to open quotes",
)
p.add_argument(
"--close-quote",
dest="close_quote",
default=config.CLOSE_QUOTE,
help="The character used to close quotes",
)
p.add_argument("--version", action="version", version=".".join(map(str, __version__)))
p.add_argument("filename", nargs="?")
p.add_argument("encoding", nargs="?", default="utf-8")
args = p.parse_args()
if args.filename and args.filename != "-":
with open(args.filename, "rb") as fp:
data = fp.read()
else:
data = sys.stdin.buffer.read()
try:
html = data.decode(args.encoding, args.decode_errors)
except UnicodeDecodeError as err:
warning = bcolors.WARNING + "Warning:" + bcolors.ENDC
warning += " Use the " + bcolors.OKGREEN
warning += "--decode-errors=ignore" + bcolors.ENDC + " flag."
print(warning)
raise err
h = HTML2Text(baseurl=baseurl)
# handle options
if args.ul_style_dash:
h.ul_item_mark = "-"
if args.em_style_asterisk:
h.emphasis_mark = "*"
h.strong_mark = "__"
h.body_width = args.body_width
h.google_list_indent = args.list_indent
h.ignore_emphasis = args.ignore_emphasis
h.ignore_links = args.ignore_links
h.ignore_mailto_links = args.ignore_mailto_links
h.protect_links = args.protect_links
h.ignore_images = args.ignore_images
h.images_as_html = args.images_as_html
h.images_to_alt = args.images_to_alt
h.images_with_size = args.images_with_size
h.google_doc = args.google_doc
h.hide_strikethrough = args.hide_strikethrough
h.escape_snob = args.escape_snob
h.bypass_tables = args.bypass_tables
h.ignore_tables = args.ignore_tables
h.single_line_break = args.single_line_break
h.inline_links = args.inline_links
h.unicode_snob = args.unicode_snob
h.use_automatic_links = args.use_automatic_links
h.skip_internal_links = args.skip_internal_links
h.links_each_paragraph = args.links_each_paragraph
h.mark_code = args.mark_code
h.wrap_links = args.wrap_links
h.wrap_list_items = args.wrap_list_items
h.wrap_tables = args.wrap_tables
h.pad_tables = args.pad_tables
h.default_image_alt = args.default_image_alt
h.open_quote = args.open_quote
h.close_quote = args.close_quote
sys.stdout.write(h.handle(html))

View File

@ -0,0 +1,164 @@
import re
# Use Unicode characters instead of their ascii pseudo-replacements
UNICODE_SNOB = True
# Marker to use for marking tables for padding post processing
TABLE_MARKER_FOR_PAD = "special_marker_for_table_padding"
# Escape all special characters. Output is less readable, but avoids
# corner case formatting issues.
ESCAPE_SNOB = True
# Put the links after each paragraph instead of at the end.
LINKS_EACH_PARAGRAPH = False
# Wrap long lines at position. 0 for no wrapping.
BODY_WIDTH = 0
# Don't show internal links (href="#local-anchor") -- corresponding link
# targets won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = False
# Use inline, rather than reference, formatting for images and links
INLINE_LINKS = True
# Protect links from line breaks surrounding them with angle brackets (in
# addition to their square brackets)
PROTECT_LINKS = True
WRAP_LINKS = True
# Wrap list items.
WRAP_LIST_ITEMS = False
# Wrap tables
WRAP_TABLES = False
# Number of pixels Google indents nested lists
GOOGLE_LIST_INDENT = 36
# Values Google and others may use to indicate bold text
BOLD_TEXT_STYLE_VALUES = ("bold", "700", "800", "900")
IGNORE_ANCHORS = False
IGNORE_MAILTO_LINKS = False
IGNORE_IMAGES = False
IMAGES_AS_HTML = False
IMAGES_TO_ALT = False
IMAGES_WITH_SIZE = False
IGNORE_EMPHASIS = False
MARK_CODE = True
DECODE_ERRORS = "strict"
DEFAULT_IMAGE_ALT = ""
PAD_TABLES = True
# Convert links with same href and text to <href> format
# if they are absolute links
USE_AUTOMATIC_LINKS = True
# For checking space-only lines on line 771
RE_SPACE = re.compile(r"\s\+")
RE_ORDERED_LIST_MATCHER = re.compile(r"\d+\.\s")
RE_UNORDERED_LIST_MATCHER = re.compile(r"[-\*\+]\s")
RE_MD_CHARS_MATCHER = re.compile(r"([\\\[\]\(\)])")
RE_MD_CHARS_MATCHER_ALL = re.compile(r"([`\*_{}\[\]\(\)#!])")
# to find links in the text
RE_LINK = re.compile(r"(\[.*?\] ?\(.*?\))|(\[.*?\]:.*?)")
# to find table separators
RE_TABLE = re.compile(r" \| ")
RE_MD_DOT_MATCHER = re.compile(
r"""
^ # start of line
(\s*\d+) # optional whitespace and a number
(\.) # dot
(?=\s) # lookahead assert whitespace
""",
re.MULTILINE | re.VERBOSE,
)
RE_MD_PLUS_MATCHER = re.compile(
r"""
^
(\s*)
(\+)
(?=\s)
""",
flags=re.MULTILINE | re.VERBOSE,
)
RE_MD_DASH_MATCHER = re.compile(
r"""
^
(\s*)
(-)
(?=\s|\-) # followed by whitespace (bullet list, or spaced out hr)
# or another dash (header or hr)
""",
flags=re.MULTILINE | re.VERBOSE,
)
RE_SLASH_CHARS = r"\`*_{}[]()#+-.!"
RE_MD_BACKSLASH_MATCHER = re.compile(
r"""
(\\) # match one slash
(?=[%s]) # followed by a char that requires escaping
"""
% re.escape(RE_SLASH_CHARS),
flags=re.VERBOSE,
)
UNIFIABLE = {
"rsquo": "'",
"lsquo": "'",
"rdquo": '"',
"ldquo": '"',
"copy": "(C)",
"mdash": "--",
"nbsp": " ",
"rarr": "->",
"larr": "<-",
"middot": "*",
"ndash": "-",
"oelig": "oe",
"aelig": "ae",
"agrave": "a",
"aacute": "a",
"acirc": "a",
"atilde": "a",
"auml": "a",
"aring": "a",
"egrave": "e",
"eacute": "e",
"ecirc": "e",
"euml": "e",
"igrave": "i",
"iacute": "i",
"icirc": "i",
"iuml": "i",
"ograve": "o",
"oacute": "o",
"ocirc": "o",
"otilde": "o",
"ouml": "o",
"ugrave": "u",
"uacute": "u",
"ucirc": "u",
"uuml": "u",
"lrm": "",
"rlm": "",
}
# Format tables in HTML rather than Markdown syntax
BYPASS_TABLES = False
# Ignore table-related tags (table, th, td, tr) while keeping rows
IGNORE_TABLES = False
# Use a single line break after a block element rather than two line breaks.
# NOTE: Requires body width setting to be 0.
SINGLE_LINE_BREAK = False
# Use double quotation marks when converting the <q> tag.
OPEN_QUOTE = '"'
CLOSE_QUOTE = '"'

View File

@ -0,0 +1,18 @@
from typing import Dict, Optional
class AnchorElement:
__slots__ = ["attrs", "count", "outcount"]
def __init__(self, attrs: Dict[str, Optional[str]], count: int, outcount: int):
self.attrs = attrs
self.count = count
self.outcount = outcount
class ListElement:
__slots__ = ["name", "num"]
def __init__(self, name: str, num: int):
self.name = name
self.num = num

View File

View File

@ -0,0 +1,3 @@
class OutCallback:
def __call__(self, s: str) -> None:
...

View File

@ -0,0 +1,282 @@
import html.entities
from typing import Dict, List, Optional
from . import config
unifiable_n = {
html.entities.name2codepoint[k]: v for k, v in config.UNIFIABLE.items() if k != "nbsp"
}
def hn(tag: str) -> int:
if tag[0] == "h" and len(tag) == 2:
n = tag[1]
if "0" < n <= "9":
return int(n)
return 0
def dumb_property_dict(style: str) -> Dict[str, str]:
"""
:returns: A hash of css attributes
"""
return {
x.strip().lower(): y.strip().lower()
for x, y in [z.split(":", 1) for z in style.split(";") if ":" in z]
}
def dumb_css_parser(data: str) -> Dict[str, Dict[str, str]]:
"""
:type data: str
:returns: A hash of css selectors, each of which contains a hash of
css attributes.
:rtype: dict
"""
# remove @import sentences
data += ";"
importIndex = data.find("@import")
while importIndex != -1:
data = data[0:importIndex] + data[data.find(";", importIndex) + 1 :]
importIndex = data.find("@import")
# parse the css. reverted from dictionary comprehension in order to
# support older pythons
pairs = [x.split("{") for x in data.split("}") if "{" in x.strip()]
try:
elements = {a.strip(): dumb_property_dict(b) for a, b in pairs}
except ValueError:
elements = {} # not that important
return elements
def element_style(
attrs: Dict[str, Optional[str]],
style_def: Dict[str, Dict[str, str]],
parent_style: Dict[str, str],
) -> Dict[str, str]:
"""
:type attrs: dict
:type style_def: dict
:type style_def: dict
:returns: A hash of the 'final' style attributes of the element
:rtype: dict
"""
style = parent_style.copy()
attrs_class = attrs.get("class")
if attrs_class:
for css_class in attrs_class.split():
css_style = style_def.get("." + css_class, {})
style.update(css_style)
attrs_style = attrs.get("style")
if attrs_style:
immediate_style = dumb_property_dict(attrs_style)
style.update(immediate_style)
return style
def google_list_style(style: Dict[str, str]) -> str:
"""
Finds out whether this is an ordered or unordered list
:type style: dict
:rtype: str
"""
if "list-style-type" in style:
list_style = style["list-style-type"]
if list_style in ["disc", "circle", "square", "none"]:
return "ul"
return "ol"
def google_has_height(style: Dict[str, str]) -> bool:
"""
Check if the style of the element has the 'height' attribute
explicitly defined
:type style: dict
:rtype: bool
"""
return "height" in style
def google_text_emphasis(style: Dict[str, str]) -> List[str]:
"""
:type style: dict
:returns: A list of all emphasis modifiers of the element
:rtype: list
"""
emphasis = []
if "text-decoration" in style:
emphasis.append(style["text-decoration"])
if "font-style" in style:
emphasis.append(style["font-style"])
if "font-weight" in style:
emphasis.append(style["font-weight"])
return emphasis
def google_fixed_width_font(style: Dict[str, str]) -> bool:
"""
Check if the css of the current element defines a fixed width font
:type style: dict
:rtype: bool
"""
font_family = ""
if "font-family" in style:
font_family = style["font-family"]
return "courier new" == font_family or "consolas" == font_family
def list_numbering_start(attrs: Dict[str, Optional[str]]) -> int:
"""
Extract numbering from list element attributes
:type attrs: dict
:rtype: int or None
"""
attrs_start = attrs.get("start")
if attrs_start:
try:
return int(attrs_start) - 1
except ValueError:
pass
return 0
def skipwrap(para: str, wrap_links: bool, wrap_list_items: bool, wrap_tables: bool) -> bool:
# If it appears to contain a link
# don't wrap
if not wrap_links and config.RE_LINK.search(para):
return True
# If the text begins with four spaces or one tab, it's a code block;
# don't wrap
if para[0:4] == " " or para[0] == "\t":
return True
# If the text begins with only two "--", possibly preceded by
# whitespace, that's an emdash; so wrap.
stripped = para.lstrip()
if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-":
return False
# I'm not sure what this is for; I thought it was to detect lists,
# but there's a <br>-inside-<span> case in one of the tests that
# also depends upon it.
if stripped[0:1] in ("-", "*") and not stripped[0:2] == "**":
return not wrap_list_items
# If text contains a pipe character it is likely a table
if not wrap_tables and config.RE_TABLE.search(para):
return True
# If the text begins with a single -, *, or +, followed by a space,
# or an integer, followed by a ., followed by a space (in either
# case optionally proceeded by whitespace), it's a list; don't wrap.
return bool(
config.RE_ORDERED_LIST_MATCHER.match(stripped)
or config.RE_UNORDERED_LIST_MATCHER.match(stripped)
)
def escape_md(text: str) -> str:
"""
Escapes markdown-sensitive characters within other markdown
constructs.
"""
return config.RE_MD_CHARS_MATCHER.sub(r"\\\1", text)
def escape_md_section(text: str, snob: bool = False) -> str:
"""
Escapes markdown-sensitive characters across whole document sections.
"""
text = config.RE_MD_BACKSLASH_MATCHER.sub(r"\\\1", text)
if snob:
text = config.RE_MD_CHARS_MATCHER_ALL.sub(r"\\\1", text)
text = config.RE_MD_DOT_MATCHER.sub(r"\1\\\2", text)
text = config.RE_MD_PLUS_MATCHER.sub(r"\1\\\2", text)
text = config.RE_MD_DASH_MATCHER.sub(r"\1\\\2", text)
return text
def reformat_table(lines: List[str], right_margin: int) -> List[str]:
"""
Given the lines of a table
padds the cells and returns the new lines
"""
# find the maximum width of the columns
max_width = [len(x.rstrip()) + right_margin for x in lines[0].split("|")]
max_cols = len(max_width)
for line in lines:
cols = [x.rstrip() for x in line.split("|")]
num_cols = len(cols)
# don't drop any data if colspan attributes result in unequal lengths
if num_cols < max_cols:
cols += [""] * (max_cols - num_cols)
elif max_cols < num_cols:
max_width += [len(x) + right_margin for x in cols[-(num_cols - max_cols) :]]
max_cols = num_cols
max_width = [max(len(x) + right_margin, old_len) for x, old_len in zip(cols, max_width)]
# reformat
new_lines = []
for line in lines:
cols = [x.rstrip() for x in line.split("|")]
if set(line.strip()) == set("-|"):
filler = "-"
new_cols = [
x.rstrip() + (filler * (M - len(x.rstrip()))) for x, M in zip(cols, max_width)
]
new_lines.append("|-" + "|".join(new_cols) + "|")
else:
filler = " "
new_cols = [
x.rstrip() + (filler * (M - len(x.rstrip()))) for x, M in zip(cols, max_width)
]
new_lines.append("| " + "|".join(new_cols) + "|")
return new_lines
def pad_tables_in_text(text: str, right_margin: int = 1) -> str:
"""
Provide padding for tables in the text
"""
lines = text.split("\n")
table_buffer = [] # type: List[str]
table_started = False
new_lines = []
for line in lines:
# Toggle table started
if config.TABLE_MARKER_FOR_PAD in line:
table_started = not table_started
if not table_started:
table = reformat_table(table_buffer, right_margin)
new_lines.extend(table)
table_buffer = []
new_lines.append("")
continue
# Process lines
if table_started:
table_buffer.append(line)
else:
new_lines.append(line)
return "\n".join(new_lines)

View File

@ -0,0 +1,196 @@
from datetime import datetime, timezone
from dateutil.parser import parse as date_parse
from base.orm import local_session
from migration.html2text import html2text
from orm.reaction import Reaction, ReactionKind
from orm.shout import Shout, ShoutReactionsFollower
from orm.topic import TopicFollower
from orm.user import User
ts = datetime.now(tz=timezone.utc)
def auto_followers(session, topics, reaction_dict):
# creating shout's reactions following for reaction author
following1 = (
session.query(ShoutReactionsFollower)
.where(ShoutReactionsFollower.follower == reaction_dict["createdBy"])
.filter(ShoutReactionsFollower.shout == reaction_dict["shout"])
.first()
)
if not following1:
following1 = ShoutReactionsFollower.create(
follower=reaction_dict["createdBy"], shout=reaction_dict["shout"], auto=True
)
session.add(following1)
# creating topics followings for reaction author
for t in topics:
tf = (
session.query(TopicFollower)
.where(TopicFollower.follower == reaction_dict["createdBy"])
.filter(TopicFollower.topic == t["id"])
.first()
)
if not tf:
topic_following = TopicFollower.create(
follower=reaction_dict["createdBy"], topic=t["id"], auto=True
)
session.add(topic_following)
def migrate_ratings(session, entry, reaction_dict):
for comment_rating_old in entry.get("ratings", []):
rater = session.query(User).filter(User.oid == comment_rating_old["createdBy"]).first()
re_reaction_dict = {
"shout": reaction_dict["shout"],
"replyTo": reaction_dict["id"],
"kind": ReactionKind.LIKE if comment_rating_old["value"] > 0 else ReactionKind.DISLIKE,
"createdBy": rater.id if rater else 1,
}
cts = comment_rating_old.get("createdAt")
if cts:
re_reaction_dict["createdAt"] = date_parse(cts)
try:
# creating reaction from old rating
rr = Reaction.create(**re_reaction_dict)
following2 = (
session.query(ShoutReactionsFollower)
.where(ShoutReactionsFollower.follower == re_reaction_dict["createdBy"])
.filter(ShoutReactionsFollower.shout == rr.shout)
.first()
)
if not following2:
following2 = ShoutReactionsFollower.create(
follower=re_reaction_dict["createdBy"], shout=rr.shout, auto=True
)
session.add(following2)
session.add(rr)
except Exception as e:
print("[migration] comment rating error: %r" % re_reaction_dict)
raise e
session.commit()
async def migrate(entry, storage):
"""
{
"_id": "hdtwS8fSyFLxXCgSC",
"body": "<p>",
"contentItem": "mnK8KsJHPRi8DrybQ",
"createdBy": "bMFPuyNg6qAD2mhXe",
"thread": "01/",
"createdAt": "2016-04-19 04:33:53+00:00",
"ratings": [
{ "createdBy": "AqmRukvRiExNpAe8C", "value": 1 },
{ "createdBy": "YdE76Wth3yqymKEu5", "value": 1 }
],
"rating": 2,
"updatedAt": "2020-05-27 19:22:57.091000+00:00",
"updatedBy": "0"
}
->
type Reaction {
id: Int!
shout: Shout!
createdAt: DateTime!
createdBy: User!
updatedAt: DateTime
deletedAt: DateTime
deletedBy: User
range: String # full / 0:2340
kind: ReactionKind!
body: String
replyTo: Reaction
stat: Stat
old_id: String
old_thread: String
}
"""
old_ts = entry.get("createdAt")
reaction_dict = {
"createdAt": (ts if not old_ts else date_parse(old_ts)),
"body": html2text(entry.get("body", "")),
"oid": entry["_id"],
}
shout_oid = entry.get("contentItem")
if shout_oid not in storage["shouts"]["by_oid"]:
if len(storage["shouts"]["by_oid"]) > 0:
return shout_oid
else:
print("[migration] no shouts migrated yet")
raise Exception
return
else:
stage = "started"
reaction = None
with local_session() as session:
author = session.query(User).filter(User.oid == entry["createdBy"]).first()
old_shout = storage["shouts"]["by_oid"].get(shout_oid)
if not old_shout:
raise Exception("no old shout in storage")
else:
stage = "author and old id found"
try:
shout = session.query(Shout).where(Shout.slug == old_shout["slug"]).one()
if shout:
reaction_dict["shout"] = shout.id
reaction_dict["createdBy"] = author.id if author else 1
reaction_dict["kind"] = ReactionKind.COMMENT
# creating reaction from old comment
reaction = Reaction.create(**reaction_dict)
session.add(reaction)
# session.commit()
stage = "new reaction commited"
reaction_dict = reaction.dict()
topics = [t.dict() for t in shout.topics]
auto_followers(session, topics, reaction_dict)
migrate_ratings(session, entry, reaction_dict)
return reaction
except Exception as e:
print(e)
print(reaction)
raise Exception(stage)
return
def migrate_2stage(old_comment, idmap):
if old_comment.get("body"):
new_id = idmap.get(old_comment.get("oid"))
new_id = idmap.get(old_comment.get("_id"))
if new_id:
new_replyto_id = None
old_replyto_id = old_comment.get("replyTo")
if old_replyto_id:
new_replyto_id = int(idmap.get(old_replyto_id, "0"))
with local_session() as session:
comment = session.query(Reaction).where(Reaction.id == new_id).first()
try:
if new_replyto_id:
new_reply = (
session.query(Reaction).where(Reaction.id == new_replyto_id).first()
)
if not new_reply:
print(new_replyto_id)
raise Exception("cannot find reply by id!")
comment.replyTo = new_reply.id
session.add(comment)
srf = (
session.query(ShoutReactionsFollower)
.where(ShoutReactionsFollower.shout == comment.shout)
.filter(ShoutReactionsFollower.follower == comment.createdBy)
.first()
)
if not srf:
srf = ShoutReactionsFollower.create(
shout=comment.shout, follower=comment.createdBy, auto=True
)
session.add(srf)
session.commit()
except Exception:
raise Exception("cannot find a comment by oldid")

View File

@ -0,0 +1,399 @@
import json
import re
from datetime import datetime, timezone
from dateutil.parser import parse as date_parse
from sqlalchemy.exc import IntegrityError
from transliterate import translit
from base.orm import local_session
from migration.extract import extract_html, extract_media
from orm.reaction import Reaction, ReactionKind
from orm.shout import Shout, ShoutReactionsFollower, ShoutTopic
from orm.topic import Topic, TopicFollower
from orm.user import User
from services.stat.viewed import ViewedStorage
OLD_DATE = "2016-03-05 22:22:00.350000"
ts = datetime.now(tz=timezone.utc)
type2layout = {
"Article": "article",
"Literature": "literature",
"Music": "music",
"Video": "video",
"Image": "image",
}
anondict = {"slug": "anonymous", "id": 1, "name": "Аноним"}
discours = {"slug": "discours", "id": 2, "name": "Дискурс"}
def get_shout_slug(entry):
slug = entry.get("slug", "")
if not slug:
for friend in entry.get("friendlySlugs", []):
slug = friend.get("slug", "")
if slug:
break
slug = re.sub("[^0-9a-zA-Z]+", "-", slug)
return slug
def create_author_from_app(app):
user = None
userdata = None
# check if email is used
if app["email"]:
with local_session() as session:
user = session.query(User).where(User.email == app["email"]).first()
if not user:
# print('[migration] app %r' % app)
name = app.get("name")
if name:
slug = translit(name, "ru", reversed=True).lower()
slug = re.sub("[^0-9a-zA-Z]+", "-", slug)
print("[migration] created slug %s" % slug)
# check if slug is used
if slug:
user = session.query(User).where(User.slug == slug).first()
# get slug from email
if user:
slug = app["email"].split("@")[0]
user = session.query(User).where(User.slug == slug).first()
# one more try
if user:
slug += "-author"
user = session.query(User).where(User.slug == slug).first()
# create user with application data
if not user:
userdata = {
"username": app["email"],
"email": app["email"],
"name": app.get("name", ""),
"emailConfirmed": False,
"slug": slug,
"createdAt": ts,
"lastSeen": ts,
}
# print('[migration] userdata %r' % userdata)
user = User.create(**userdata)
session.add(user)
session.commit()
userdata["id"] = user.id
userdata = user.dict()
return userdata
else:
raise Exception("app is not ok", app)
async def create_shout(shout_dict):
s = Shout.create(**shout_dict)
author = s.authors[0]
with local_session() as session:
srf = (
session.query(ShoutReactionsFollower)
.where(ShoutReactionsFollower.shout == s.id)
.filter(ShoutReactionsFollower.follower == author.id)
.first()
)
if not srf:
srf = ShoutReactionsFollower.create(shout=s.id, follower=author.id, auto=True)
session.add(srf)
session.commit()
return s
async def get_user(entry, storage):
app = entry.get("application")
userdata = None
user_oid = None
if app:
userdata = create_author_from_app(app)
else:
user_oid = entry.get("createdBy")
if user_oid == "0":
userdata = discours
elif user_oid:
userdata = storage["users"]["by_oid"].get(user_oid)
if not userdata:
print("no userdata by oid, anonymous")
userdata = anondict
print(app)
# cleanup slug
if userdata:
slug = userdata.get("slug", "")
if slug:
slug = re.sub("[^0-9a-zA-Z]+", "-", slug)
userdata["slug"] = slug
else:
userdata = anondict
user = await process_user(userdata, storage, user_oid)
return user, user_oid
async def migrate(entry, storage):
author, user_oid = await get_user(entry, storage)
r = {
"layout": type2layout[entry["type"]],
"title": entry["title"],
"authors": [
author,
],
"slug": get_shout_slug(entry),
"cover": (
"https://images.discours.io/unsafe/" + entry["thumborId"]
if entry.get("thumborId")
else entry.get("image", {}).get("url")
),
"visibility": "public" if entry.get("published") else "community",
"publishedAt": date_parse(entry.get("publishedAt")) if entry.get("published") else None,
"deletedAt": date_parse(entry.get("deletedAt")) if entry.get("deletedAt") else None,
"createdAt": date_parse(entry.get("createdAt", OLD_DATE)),
"updatedAt": date_parse(entry["updatedAt"]) if "updatedAt" in entry else ts,
"createdBy": author.id,
"topics": await add_topics_follower(entry, storage, author),
"body": extract_html(entry, cleanup=True),
}
# main topic patch
r["mainTopic"] = r["topics"][0]
# published author auto-confirm
if entry.get("published"):
with local_session() as session:
# update user.emailConfirmed if published
author.emailConfirmed = True
session.add(author)
session.commit()
# media
media = extract_media(entry)
r["media"] = json.dumps(media, ensure_ascii=True) if media else None
# ----------------------------------- copy
shout_dict = r.copy()
del shout_dict["topics"]
try:
# save shout to db
shout_dict["oid"] = entry.get("_id", "")
shout = await create_shout(shout_dict)
except IntegrityError as e:
print("[migration] create_shout integrity error", e)
shout = await resolve_create_shout(shout_dict)
except Exception as e:
raise Exception(e)
# udpate data
shout_dict = shout.dict()
shout_dict["authors"] = [
author.dict(),
]
# shout topics aftermath
shout_dict["topics"] = await topics_aftermath(r, storage)
# content_item ratings to reactions
await content_ratings_to_reactions(entry, shout_dict["slug"])
# shout views
await ViewedStorage.increment(
shout_dict["slug"], amount=entry.get("views", 1), viewer="old-discours"
)
# del shout_dict['ratings']
storage["shouts"]["by_oid"][entry["_id"]] = shout_dict
storage["shouts"]["by_slug"][shout_dict["slug"]] = shout_dict
return shout_dict
async def add_topics_follower(entry, storage, user):
topics = set([])
category = entry.get("category")
topics_by_oid = storage["topics"]["by_oid"]
oids = [
category,
] + entry.get("tags", [])
for toid in oids:
tslug = topics_by_oid.get(toid, {}).get("slug")
if tslug:
topics.add(tslug)
ttt = list(topics)
# add author as TopicFollower
with local_session() as session:
for tpcslug in topics:
try:
tpc = session.query(Topic).where(Topic.slug == tpcslug).first()
if tpc:
tf = (
session.query(TopicFollower)
.where(TopicFollower.follower == user.id)
.filter(TopicFollower.topic == tpc.id)
.first()
)
if not tf:
tf = TopicFollower.create(topic=tpc.id, follower=user.id, auto=True)
session.add(tf)
session.commit()
except IntegrityError:
print("[migration.shout] hidden by topic " + tpc.slug)
# main topic
maintopic = storage["replacements"].get(topics_by_oid.get(category, {}).get("slug"))
if maintopic in ttt:
ttt.remove(maintopic)
ttt.insert(0, maintopic)
return ttt
async def process_user(userdata, storage, oid):
with local_session() as session:
uid = userdata.get("id") # anonymous as
if not uid:
print(userdata)
print("has no id field, set it @anonymous")
userdata = anondict
uid = 1
user = session.query(User).filter(User.id == uid).first()
if not user:
try:
slug = userdata["slug"].lower().strip()
slug = re.sub("[^0-9a-zA-Z]+", "-", slug)
userdata["slug"] = slug
user = User.create(**userdata)
session.add(user)
session.commit()
except IntegrityError:
print(f"[migration] user creating with slug {userdata['slug']}")
print("[migration] from userdata")
print(userdata)
raise Exception("[migration] cannot create user in content_items.get_user()")
if user.id == 946:
print("[migration] ***************** ALPINA")
if user.id == 2:
print("[migration] +++++++++++++++++ DISCOURS")
userdata["id"] = user.id
userdata["createdAt"] = user.createdAt
storage["users"]["by_slug"][userdata["slug"]] = userdata
storage["users"]["by_oid"][oid] = userdata
if not user:
raise Exception("could not get a user")
return user
async def resolve_create_shout(shout_dict):
with local_session() as session:
s = session.query(Shout).filter(Shout.slug == shout_dict["slug"]).first()
bump = False
if s:
if s.createdAt != shout_dict["createdAt"]:
# create new with different slug
shout_dict["slug"] += "-" + shout_dict["layout"]
try:
await create_shout(shout_dict)
except IntegrityError as e:
print(e)
bump = True
else:
# update old
for key in shout_dict:
if key in s.__dict__:
if s.__dict__[key] != shout_dict[key]:
print("[migration] shout already exists, but differs in %s" % key)
bump = True
else:
print("[migration] shout already exists, but lacks %s" % key)
bump = True
if bump:
s.update(shout_dict)
else:
print("[migration] something went wrong with shout: \n%r" % shout_dict)
raise Exception("")
session.commit()
return s
async def topics_aftermath(entry, storage):
r = []
for tpc in filter(lambda x: bool(x), entry["topics"]):
oldslug = tpc
newslug = storage["replacements"].get(oldslug, oldslug)
if newslug:
with local_session() as session:
shout = session.query(Shout).where(Shout.slug == entry["slug"]).first()
new_topic = session.query(Topic).where(Topic.slug == newslug).first()
shout_topic_old = (
session.query(ShoutTopic)
.join(Shout)
.join(Topic)
.filter(Shout.slug == entry["slug"])
.filter(Topic.slug == oldslug)
.first()
)
if shout_topic_old:
shout_topic_old.update({"topic": new_topic.id})
else:
shout_topic_new = (
session.query(ShoutTopic)
.join(Shout)
.join(Topic)
.filter(Shout.slug == entry["slug"])
.filter(Topic.slug == newslug)
.first()
)
if not shout_topic_new:
try:
ShoutTopic.create(**{"shout": shout.id, "topic": new_topic.id})
except Exception:
print("[migration] shout topic error: " + newslug)
session.commit()
if newslug not in r:
r.append(newslug)
else:
print("[migration] ignored topic slug: \n%r" % tpc["slug"])
# raise Exception
return r
async def content_ratings_to_reactions(entry, slug):
try:
with local_session() as session:
for content_rating in entry.get("ratings", []):
rater = (
session.query(User).filter(User.oid == content_rating["createdBy"]).first()
) or User.default_user
shout = session.query(Shout).where(Shout.slug == slug).first()
cts = content_rating.get("createdAt")
reaction_dict = {
"createdAt": date_parse(cts) if cts else None,
"kind": ReactionKind.LIKE
if content_rating["value"] > 0
else ReactionKind.DISLIKE,
"createdBy": rater.id,
"shout": shout.id,
}
reaction = (
session.query(Reaction)
.filter(Reaction.shout == reaction_dict["shout"])
.filter(Reaction.createdBy == reaction_dict["createdBy"])
.filter(Reaction.kind == reaction_dict["kind"])
.first()
)
if reaction:
k = ReactionKind.AGREE if content_rating["value"] > 0 else ReactionKind.DISAGREE
reaction_dict["kind"] = k
reaction.update(reaction_dict)
session.add(reaction)
else:
rea = Reaction.create(**reaction_dict)
session.add(rea)
# shout_dict['ratings'].append(reaction_dict)
session.commit()
except Exception:
print("[migration] content_item.ratings error: \n%r" % content_rating)

View File

@ -0,0 +1,35 @@
# from base.orm import local_session
# from migration.extract import extract_md
# from migration.html2text import html2text
# from orm.reaction import Reaction, ReactionKind
# def migrate(entry, storage):
# post_oid = entry["contentItem"]
# print(post_oid)
# shout_dict = storage["shouts"]["by_oid"].get(post_oid)
# if shout_dict:
# print(shout_dict["body"])
# remark = {
# "shout": shout_dict["id"],
# "body": extract_md(html2text(entry["body"]), shout_dict),
# "kind": ReactionKind.REMARK,
# }
#
# if entry.get("textBefore"):
# remark["range"] = (
# str(shout_dict["body"].index(entry["textBefore"] or ""))
# + ":"
# + str(
# shout_dict["body"].index(entry["textAfter"] or "")
# + len(entry["textAfter"] or "")
# )
# )
#
# with local_session() as session:
# rmrk = Reaction.create(**remark)
# session.commit()
# del rmrk["_sa_instance_state"]
# return rmrk
# return

View File

@ -0,0 +1,828 @@
{
"207": "207",
"1990-e": "90s",
"2000-e": "2000s",
"90-e": "90s",
"Georgia": "georgia",
"Japan": "japan",
"Sweden": "sweden",
"abstraktsiya": "abstract",
"absurdism": "absurdism",
"acclimatization": "acclimatisation",
"activism": "activism",
"adolf-gitler": "adolf-hitler",
"afrika": "africa",
"agata-kristi": "agatha-christie",
"agressivnoe-povedenie": "agression",
"agressiya": "agression",
"aktsii": "actions",
"aktsionizm": "actionism",
"alber-kamyu": "albert-kamus",
"albomy": "albums",
"aleksandr-griboedov": "aleksander-griboedov",
"aleksandr-pushkin": "aleksander-pushkin",
"aleksandr-solzhenitsyn": "aleksander-solzhenitsyn",
"aleksandr-vvedenskiy": "aleksander-vvedensky",
"aleksey-navalnyy": "alexey-navalny",
"alfavit": "alphabet",
"alkogol": "alcohol",
"alternativa": "alternative",
"alternative": "alternative",
"alternativnaya-istoriya": "alternative-history",
"amerika": "america",
"anarhizm": "anarchism",
"anatoliy-mariengof": "anatoly-mariengof",
"ancient-russia": "ancient-russia",
"andegraund": "underground",
"andrey-platonov": "andrey-platonov",
"andrey-rodionov": "andrey-rodionov",
"andrey-tarkovskiy": "andrey-tarkovsky",
"angliyskie-istorii": "english-stories",
"angliyskiy-yazyk": "english-langugae",
"ango": "ango",
"animation": "animation",
"animatsiya": "animation",
"anime": "anime",
"anri-volohonskiy": "anri-volohonsky",
"antifashizm": "anti-faschism",
"antiquity": "antiquity",
"antiutopiya": "dystopia",
"anton-dolin": "anton-dolin",
"antropology": "antropology",
"antropotsen": "antropocenus",
"architecture": "architecture",
"arheologiya": "archeology",
"arhetipy": "archetypes",
"arhiv": "archive",
"aristokraty": "aristocracy",
"aristotel": "aristotle",
"arktika": "arctic",
"armiya": "army",
"armiya-1": "army",
"art": "art",
"art-is": "art-is",
"artists": "artists",
"ateizm": "atheism",
"audio-poetry": "audio-poetry",
"audiopoeziya": "audio-poetry",
"audiospektakl": "audio-spectacles",
"auktsyon": "auktsyon",
"avangard": "avantgarde",
"avtofikshn": "autofiction",
"avtorskaya-pesnya": "bardsongs",
"azbuka-immigratsii": "immigration-basics",
"aziatskiy-kinematograf": "asian-cinema",
"b-movie": "b-movie",
"bannye-chteniya": "sauna-reading",
"bardsongs": "bardsongs",
"bdsm": "bdsm",
"beecake": "beecake",
"belarus": "belarus",
"belgiya": "belgium",
"bertold-breht": "berttold-brecht",
"bezumie": "madness",
"biography": "biography",
"biologiya": "biology",
"bipolyarnoe-rasstroystvo": "bipolar-disorder",
"bitniki": "beatnics",
"biznes": "business",
"blizhniy-vostok": "middle-east",
"blizost": "closeness",
"blocked-in-russia": "blocked-in-russia",
"blokada": "blockade",
"bob-dilan": "bob-dylan",
"bog": "god",
"bol": "pain",
"bolotnoe-delo": "bolotnaya-case",
"books": "books",
"boris-eltsin": "boris-eltsin",
"boris-godunov": "boris-godunov",
"boris-grebenschikov": "boris-grebenschikov",
"boris-nemtsov": "boris-nemtsov",
"boris-pasternak": "boris-pasternak",
"brak": "marriage",
"bret-iston-ellis": "bret-iston-ellis",
"buddizm": "buddhism",
"bullying": "bullying",
"bunt": "riot",
"burning-man": "burning-man",
"bytie": "being",
"byurokratiya": "bureaucracy",
"capitalism": "capitalism",
"censored-in-russia": "censored-in-russia",
"ch-rno-beloe": "black-and-white",
"ch-rnyy-yumor": "black-humour",
"chapters": "chapters",
"charity": "charity",
"chayldfri": "childfree",
"chechenskaya-voyna": "chechen-war",
"chechnya": "chechnya",
"chelovek": "male",
"chernobyl": "chernobyl",
"chernyy-yumor": "black-humour",
"children": "children",
"china": "china",
"chinovniki": "bureaucracy",
"chukotka": "chukotka",
"chuma": "plague",
"church": "church",
"cinema": "cinema",
"city": "city",
"civil-position": "civil-position",
"clips": "clips",
"collage": "collage",
"comics": "comics",
"conspiracy-theory": "conspiracy-theory",
"contemporary-art": "contemporary-art",
"contemporary-poetry": "poetry",
"contemporary-prose": "prose",
"coronavirus": "coronavirus",
"corruption": "corruption",
"creative-writing-school": "creative-writing-school",
"crime": "crime",
"criticism": "criticism",
"critiques": "reviews",
"culture": "culture",
"dadaizm": "dadaism",
"daniel-defo": "daniel-defoe",
"daniil-harms": "daniil-kharms",
"dante-aligeri": "dante-alighieri",
"darkveyv": "darkwave",
"death": "death",
"debaty": "debats",
"delo-seti": "seti-case",
"democracy": "democracy",
"demografiya": "demographics",
"demonstrations": "demonstrations",
"depression": "depression",
"derevnya": "village",
"derrida": "derrida",
"design": "design",
"detskie-doma": "orphanages",
"detstvo": "childhood",
"devid-linch": "david-linch",
"devyanostye": "90s",
"dialog": "dialogue",
"digital": "digital",
"digital-art": "digital-art",
"dinozavry": "dinosaurs",
"directing": "directing",
"diskurs": "discours",
"diskurs-1": "discourse",
"diskurs-analiz": "discourse-analytics",
"dissidenty": "dissidents",
"diy": "diy",
"dmitriy-donskoy": "dmitriy-donskoy",
"dmitriy-prigov": "dmitriy-prigov",
"dnevnik-1": "dairy",
"dnevniki": "dairies",
"documentary": "documentary",
"dokumentalnaya-poema": "documentary-poem",
"dokumentalnaya-poeziya": "documentary-poetry",
"dokumenty": "doсuments",
"domashnee-nasilie": "home-terror",
"donald-tramp": "donald-trump",
"donbass": "donbass",
"donbass-diary": "donbass-diary",
"donorstvo": "donation",
"dozhd": "rain",
"drama": "drama",
"dramaturgy": "dramaturgy",
"drawing": "drawing",
"drevo-zhizni": "tree-of-life",
"drugs": "drugs",
"duh": "spirit",
"dzhaz": "jazz",
"dzhek-keruak": "jack-keruak",
"dzhim-morrison": "jim-morrison",
"dzhordzh-romero": "george-romero",
"dzhordzho-agamben": "giorgio-agamben",
"ecology": "ecology",
"economics": "economics",
"eda": "food",
"editorial-statements": "editorial-statements",
"eduard-limonov": "eduard-limonov",
"education": "education",
"egor-letov": "egor-letov",
"ekspat": "expat",
"eksperiment": "experiments",
"eksperimentalnaya-muzyka": "experimental-music",
"ekspressionizm": "expressionism",
"ekstremizm": "extremism",
"ekzistentsializm-1": "existentialism",
"ekzistentsiya": "existence",
"elections": "elections",
"electronic": "electronics",
"electronics": "electronics",
"elena-glinskaya": "elena-glinskaya",
"elena-guro": "elena-guro",
"elizaveta-mnatsakanova": "elizaveta-mnatsakanova",
"embient": "ambient",
"emigration": "emigration",
"emil-dyurkgeym": "emile-durkheim",
"emotsii": "emotions",
"empiric": "empiric",
"epidemiya": "pandemic",
"erich-von-neff": "erich-von-neff",
"erotika": "erotics",
"essay": "essay",
"estetika": "aestetics",
"etika": "ethics",
"etno": "ethno",
"etnos": "ethnics",
"everyday-life": "everyday-life",
"evgeniy-onegin": "eugene-onegin",
"evolyutsiya": "evolution",
"exhibitions": "exhibitions",
"experience": "experiences",
"experimental": "experimental",
"experimental-music": "experimental-music",
"explanation": "explanation",
"faktcheking": "fact-checking",
"falsifikatsii": "falsifications",
"family": "family",
"fanfiki": "fan-fiction",
"fantastika": "sci-fi",
"fatalizm": "fatalism",
"fedor-dostoevskiy": "fedor-dostoevsky",
"fedor-ioannovich": "fedor-ioannovich",
"feleton": "feuilleton",
"feminism": "feminism",
"fenomenologiya": "phenomenology",
"fentezi": "fantasy",
"festival": "festival",
"festival-territoriya": "festival-territory",
"folk": "folk",
"folklor": "folklore",
"fotoreportazh": "photoreports",
"france": "france",
"frants-kafka": "franz-kafka",
"frederik-begbeder": "frederick-begbeder",
"freedom": "freedom",
"friendship": "friendship",
"fsb": "fsb",
"futbol": "footbool",
"future": "future",
"futuristy": "futurists",
"futurizm": "futurism",
"galereya": "gallery",
"galereya-anna-nova": "gallery-anna-nova",
"gdr": "gdr",
"gender": "gender",
"gendernyy-diskurs": "gender",
"gennadiy-aygi": "gennadiy-aygi",
"gerhard-rihter": "gerhard-rihter",
"germaniya": "germany",
"germenevtika": "hermeneutics",
"geroi": "heroes",
"girls": "girls",
"gkchp": "gkchp",
"glitch": "glitch",
"globalizatsiya": "globalisation",
"gollivud": "hollywood",
"gonzo": "gonzo",
"gore-ot-uma": "woe-from-wit",
"graffiti": "graffiti",
"graficheskaya-novella": "graphic-novell",
"graphics": "graphics",
"gravyura": "engraving",
"grazhdanskaya-oborona": "grazhdanskaya-oborona",
"gretsiya": "greece",
"griby": "mushrooms",
"gruziya-2": "georgia",
"gulag": "gulag",
"han-batyy": "khan-batyy",
"hayku": "haiku",
"health": "health",
"himiya": "chemistry",
"hip-hop": "hip-hop",
"history": "history",
"history-of-russia": "history-of-russia",
"holokost": "holocaust",
"horeografiya": "choreography",
"horror": "horror",
"hospis": "hospice",
"hristianstvo": "christianity",
"humans": "humans",
"humour": "humour",
"ideologiya": "ideology",
"idm": "idm",
"igil": "isis",
"igor-pomerantsev": "igor-pomerantsev",
"igra": "game",
"igra-prestolov": "game-of-throne",
"igry": "games",
"iisus-hristos": "jesus-christ",
"illness": "illness",
"illustration-history": "illustration-history",
"illustrations": "illustrations",
"imazhinizm": "imagism",
"immanuil-kant": "immanuel-kant",
"impressionizm": "impressionism",
"improvizatsiya": "improvisation",
"indi": "indie",
"individualizm": "individualism",
"infografika": "infographics",
"informatsiya": "information",
"ingmar-bergman": "ingmar-bergman",
"inklyuziya": "inclusion",
"installyatsiya": "installation",
"internet": "internet",
"interview": "interview",
"invalidnost": "disability",
"investigations": "investigations",
"iosif-brodskiy": "joseph-brodsky",
"iosif-stalin": "joseph-stalin",
"iskusstvennyy-intellekt": "artificial-intelligence",
"islam": "islam",
"istoriya-moskvy": "moscow-history",
"istoriya-nauki": "history-of-sceince",
"istoriya-o-medsestre": "nurse-story",
"istoriya-teatra": "theatre-history",
"italiya": "italy",
"italyanskiy-yazyk": "italian-language",
"iudaika": "judaica",
"ivan-groznyy": "ivan-grozny",
"ivan-iii-gorbatyy": "ivan-iii-gorbaty",
"ivan-kalita": "ivan-kalita",
"ivan-krylov": "ivan-krylov",
"izobreteniya": "inventions",
"izrail-1": "israel",
"jazz": "jazz",
"john-lennon": "john-lennon",
"journalism": "journalism",
"justice": "justice",
"k-pop": "k-pop",
"kalligrafiya": "calligraphy",
"karikatura": "caricatures",
"kartochki-rubinshteyna": "rubinstein-cards",
"katrin-nenasheva": "katrin-nenasheva",
"kavarga": "kavarga",
"kavkaz": "caucasus",
"kazan": "kazan",
"kiberbezopasnost": "cybersecurity",
"kinoklub": "cinema-club",
"kinokritika": "film-criticism",
"kirill-serebrennikov": "kirill-serebrennikov",
"kladbische": "cemetery",
"klassika": "classic",
"kollektivnoe-bessoznatelnoe": "сollective-unconscious",
"komediya": "comedy",
"kommunikatsii": "communications",
"kommunizm": "communism",
"kommuny": "communes",
"kompyuternye-igry": "computer-games",
"konets-vesny": "end-of-spring",
"konservatizm": "conservatism",
"kontrkultura": "counter-culture",
"kontseptualizm": "conceptualism",
"korotkometrazhka": "cinema-shorts",
"kosmos": "cosmos",
"kraudfanding": "crowdfunding",
"kriptovalyuty": "cryptocurrencies",
"krizis": "crisis",
"krov": "blood",
"krym": "crimea",
"kulturologiya": "culturology",
"kulty": "cults",
"kurdistan": "kurdistan",
"kurt-kobeyn": "kurt-cobain",
"kurt-vonnegut": "kurt-vonnegut",
"kvir": "queer",
"laboratoriya": "lab",
"language": "languages",
"lars-fon-trier": "lars-fon-trier",
"laws": "laws",
"lectures": "lectures",
"leto": "summer",
"lev-tolstoy": "leo-tolstoy",
"lgbt": "lgbt",
"liberalizm": "liberalism",
"libertarianstvo": "libertarianism",
"life": "life",
"likbez": "likbez",
"lingvistika": "linguistics",
"lirika": "lirics",
"literary-studies": "literary-studies",
"literature": "literature",
"literaturnyykaver": "literature-cover",
"lo-fi": "lo-fi",
"lomonosov": "lomonosov",
"love": "love",
"luzha-goluboy-krovi": "luzha-goluboy-krovi",
"lyudvig-vitgenshteyn": "ludwig-wittgenstein",
"lzhedmitriy": "false-dmitry",
"lzhenauka": "pseudoscience",
"magiya": "magic",
"maks-veber": "max-weber",
"manifests": "manifests",
"manipulyatsii-soznaniem": "mind-manipulation",
"marina-abramovich": "marina-abramovich",
"marketing": "marketing",
"marksizm": "marxism",
"marsel-dyushan": "marchel-duchamp",
"marsel-prust": "marcel-proust",
"martin-haydegger": "martin-hidegger",
"matematika": "maths",
"mayakovskiy": "vladimir-mayakovsky",
"media": "media",
"medicine": "medicine",
"memuary": "memoirs",
"menedzhment": "management",
"menty": "police",
"merab-mamardashvili": "merab-mamardashvili",
"mest": "revenge",
"metamodernizm": "metamodern",
"metavselennaya": "metaverse",
"metro": "metro",
"mifologiya": "mythology",
"mify": "myth",
"mihael-haneke": "michael-haneke",
"mihail-baryshnikov": "mihail-baryshnikov",
"mihail-bulgakov": "mihail-bulgakov",
"mikrotonalnaya-muzyka": "mikrotone-muzyka",
"minimalizm": "minimalism",
"minkult-privet": "minkult-privet",
"mir": "world",
"mirovozzrenie": "mindsets",
"mishel-fuko": "michel-foucault",
"mistika": "mystics",
"mitropolit-makariy": "mitropolit-makariy",
"mlm": "mlm",
"mobilizatsiya": "mobilisation",
"moda": "fashion",
"modernizm": "modernism",
"mokyumentari": "mockumentary",
"molodezh": "youth",
"moloko-plus": "moloko-plus",
"money": "money",
"monologs": "monologues",
"monstratsiya": "monstration",
"moralnaya-otvetstvennost": "moral-responsibility",
"more": "sea",
"moscow": "moscow",
"moshennichestvo": "frauds",
"moskovskiy-romanticheskiy-kontseptualizm": "moscow-romantic-conceptualism",
"moskovskoe-delo": "moscow-case",
"movies": "movies",
"mozg": "brain",
"multiplikatsiya": "animation",
"music": "music",
"musulmanstvo": "islam",
"muzei": "museum",
"muzey": "museum",
"muzhchiny": "man",
"myshlenie": "thinking",
"nagornyy-karabah": "nagorno-karabakh",
"nasilie-1": "violence",
"natsionalizm": "nationalism",
"natsionalnaya-ideya": "national-idea",
"natsizm": "nazism",
"natyurmort": "nature-morte",
"nauchpop": "pop-science",
"nbp": "nbp",
"nenavist": "hate",
"neofitsialnaya-literatura": "unofficial-literature",
"neoklassika": "neoclassic",
"neprozrachnye-smysly": "hidden-meanings",
"neravenstvo": "inequality",
"net-voyne": "no-war",
"new-year": "new-year",
"neyronauka": "neuro-science",
"neyroseti": "neural-networks",
"niu-vshe": "hse",
"nizhniy-novgorod": "nizhny-novgorod",
"nko": "nonprofits",
"nlo": "ufo",
"nobelevskaya-premiya": "nobel-prize",
"noize-mc": "noize-mc",
"nonkonformizm": "nonconformism",
"notforall": "notforall",
"novaya-drama": "new-drama",
"novosti": "news",
"noyz": "noise",
"nuar": "noir",
"oberiu": "oberiu",
"ocherk": "etudes",
"ochevidnyy-nuar": "ochevidnyy-nuar",
"odinochestvo": "loneliness",
"odna-kniga-odna-istoriya": "one-book-one-story",
"okrainy": "outskirts",
"omon": "swat",
"opinions": "opinions",
"oppozitsiya": "opposition",
"orhan-pamuk": "orhan-pamuk",
"ornitologiya": "ornitology",
"osen": "autumn",
"osip-mandelshtam": "osip-mandelshtam",
"oskar-uayld": "oscar-wilde",
"osoznanie": "awareness",
"otnosheniya": "relationship",
"pablo-pikasso": "pablo-picasso",
"painting": "painting",
"paintings": "painting",
"pamyat": "memory",
"pandemiya": "pandemic",
"parizh": "paris",
"patriotizm": "patriotism",
"patsifizm": "pacifism",
"paul-tselan": "paul-tselan",
"per-burd": "pierre-bourdieu",
"perezhivaniya": "worries",
"performance": "performance",
"peyzazh": "landscape",
"philology": "philology",
"philosophy": "philosophy",
"photo": "photography",
"photography": "photography",
"photoprojects": "photoprojects",
"plakaty": "posters",
"plastilin": "plasticine",
"plays": "plays",
"podrostki": "teenagers",
"poema": "poem",
"poems": "poems",
"poeticheskaya-proza": "poetic-prose",
"poetry": "poetry",
"poetry-of-squares": "poetry-of-squares",
"poetry-slam": "poetry-slam",
"pokoy": "peace",
"police": "police",
"politicheskoe-fentezi": "political-fantasy",
"politics": "politics",
"politzaklyuchennye": "political-prisoners",
"polsha": "poland",
"pomosch": "help",
"pop-art": "pop-art",
"pop-culture": "pop-culture",
"populyarnaya-psihologiya": "popular-psychology",
"pornografiya": "pornography",
"portret": "portrait",
"poslovitsy": "proverbs",
"post-pank": "post-punk",
"post-rok": "post-rock",
"postmodernism": "postmodernism",
"povest": "novells",
"povsednevnost": "everyday-life",
"power": "power",
"pravo": "right",
"pravoslavie": "orthodox",
"pravozaschitniki": "human-rights-activism",
"prazdnik": "holidays",
"predatelstvo": "betrayal",
"predprinimatelstvo": "entrepreneurship",
"premera": "premier",
"premiya-oskar": "oscar-prize",
"pribaltika-1": "baltic",
"priroda": "nature",
"prison": "prison",
"pritcha": "parable",
"privatnost": "privacy",
"progress": "progress",
"projects": "projects",
"prokrastinatsiya": "procrastination",
"propaganda": "propaganda",
"proschenie": "forgiveness",
"prose": "prose",
"proshloe": "past",
"prostitutsiya": "prostitution",
"prosveschenie": "enlightenment",
"protests": "protests",
"psalmy": "psalms",
"psihoanaliz": "psychoanalysis",
"psihodeliki": "psychodelics",
"pskov": "pskov",
"psychiatry": "psychiatry",
"psychology": "psychology",
"ptitsy": "birds",
"punk": "punk",
"r-b": "rnb",
"rasizm": "racism",
"realizm": "realism",
"redaktura": "editing",
"refleksiya": "reflection",
"reggi": "reggae",
"religion": "religion",
"rene-zhirar": "rene-girard",
"renesanss": "renessance",
"renovatsiya": "renovation",
"rep": "rap",
"reportage": "reportage",
"reportazh-1": "reportage",
"repressions": "repressions",
"research": "research",
"retroveyv": "retrowave",
"review": "review",
"revolution": "revolution",
"rezo-gabriadze": "rezo-gabriadze",
"risunki": "painting",
"roboty": "robots",
"rock": "rock",
"roditeli": "parents",
"romantizm": "romantism",
"romany": "novell",
"ronald-reygan": "ronald-reygan",
"roskomnadzor": "roskomnadzor",
"rossiyskoe-kino": "russian-cinema",
"rouling": "rowling",
"rozhava": "rojava",
"rpts": "rpts",
"rus-na-grani-sryva": "rus-na-grani-sryva",
"russia": "russia",
"russian-language": "russian-language",
"russian-literature": "russian-literature",
"russkaya-toska": "russian-toska",
"russkiy-mir": "russkiy-mir",
"salo": "lard",
"salvador-dali": "salvador-dali",
"samoidentifikatsiya": "self-identity",
"samoopredelenie": "self-definition",
"sankt-peterburg": "saint-petersburg",
"sasha-skochilenko": "sasha-skochilenko",
"satira": "satiric",
"saund-art": "sound-art",
"schaste": "happiness",
"school": "school",
"science": "science",
"sculpture": "sculpture",
"second-world-war": "second-world-war",
"sekond-hend": "second-hand",
"seksprosvet": "sex-education",
"seksualizirovannoe-nasilie": "sexualized-violence",
"seksualnoe-nasilie": "sexualized-violence",
"sekty": "sects",
"semi": "semi",
"semiotics": "semiotics",
"serbiya": "serbia",
"sergey-bodrov-mladshiy": "sergey-bodrov-junior",
"sergey-solov-v": "sergey-solovyov",
"serialy": "series",
"sever": "north",
"severnaya-koreya": "north-korea",
"sex": "sex",
"shotlandiya": "scotland",
"shugeyz": "shoegaze",
"siloviki": "siloviki",
"simeon-bekbulatovich": "simeon-bekbulatovich",
"simvolizm": "simbolism",
"siriya": "siria",
"skulptura": "sculpture",
"slavoy-zhizhek": "slavoj-zizek",
"smert-1": "death",
"smysl": "meaning",
"sny": "dreams",
"sobytiya": "events",
"social": "society",
"society": "society",
"sociology": "sociology",
"sofya-paleolog": "sofya-paleolog",
"sofya-vitovtovna": "sofya-vitovtovna",
"soobschestva": "communities",
"soprotivlenie": "resistence",
"sotsializm": "socialism",
"sotsialnaya-filosofiya": "social-philosophy",
"sotsiologiya-1": "sociology",
"sotsseti": "social-networks",
"sotvorenie-tretego-rima": "third-rome",
"sovremennost": "modernity",
"spaces": "spaces",
"spektakl": "spectacles",
"spetseffekty": "special-fx",
"spetsoperatsiya": "special-operation",
"spetssluzhby": "special-services",
"sport": "sport",
"srednevekove": "middle-age",
"state": "state",
"statistika": "statistics",
"stendap": "stand-up",
"stihi": "poetry",
"stoitsizm": "stoicism",
"stories": "stories",
"stoyanie-na-ugre": "stoyanie-na-ugre",
"strah": "fear",
"street-art": "street-art",
"stsenarii": "scenarios",
"sud": "court",
"summary": "summary",
"supergeroi": "superheroes",
"svetlana-aleksievich": "svetlana-aleksievich",
"svobodu-ivanu-golunovu": "free-ivan-golunov",
"syurrealizm": "surrealism",
"tales": "tales",
"tanets": "dance",
"tataro-mongolskoe-igo": "mongol-tatar-yoke",
"tatuirovki": "tattoo",
"technology": "technology",
"televidenie": "television",
"telo": "body",
"telo-kak-iskusstvo": "body-as-art",
"terrorizm": "terrorism",
"tests": "tests",
"text": "texts",
"the-beatles": "the-beatles",
"theater": "theater",
"theory": "theory",
"tokio": "tokio",
"torture": "torture",
"totalitarizm": "totalitarism",
"traditions": "traditions",
"tragicomedy": "tragicomedy",
"transgendernost": "transgender",
"translation": "translation",
"transport": "transport",
"travel": "travel",
"travma": "trauma",
"trendy": "trends",
"tretiy-reyh": "third-reich",
"triller": "thriller",
"tsar": "central-african-republic",
"tsar-edip": "oedipus",
"tsarevich-dmitriy": "tsarevich-dmitry",
"tsennosti": "values",
"tsenzura": "censorship",
"tseremonii": "ceremonies",
"turizm": "tourism",
"tvorchestvo": "creativity",
"ugnetennyy-zhilischnyy-klass": "oppressed-housing-class",
"uilyam-shekspir": "william-shakespeare",
"ukraina-2": "ukraine",
"ukraine": "ukraine",
"university": "university",
"urban-studies": "urban-studies",
"uroki-literatury": "literature-lessons",
"usa": "usa",
"ussr": "ussr",
"utopiya": "utopia",
"utrata": "loss",
"valter-benyamin": "valter-benyamin",
"varlam-shalamov": "varlam-shalamov",
"vasiliy-ii-temnyy": "basil-ii-temnyy",
"vasiliy-iii": "basil-iii",
"vdnh": "vdnh",
"vechnost": "ethernety",
"velikobritaniya": "great-britain",
"velimir-hlebnikov": "velimir-hlebnikov",
"velkom-tu-greyt-britn": "welcome-to-great-britain",
"venedikt-erofeev": "venedikt-erofeev",
"venetsiya": "veneece",
"vengriya": "hungary",
"verlibry": "free-verse",
"veschi": "things",
"vessels": "vessels",
"veterany": "veterans",
"video": "video",
"videoart": "videoart",
"videoklip": "clips",
"videopoeziya": "video-poetry",
"viktor-astafev": "viktor-astafev",
"viktor-pelevin": "viktor-pelevin",
"vilgelm-rayh": "wilhelm-reich",
"vinzavod": "vinzavod",
"violence": "violence",
"visual-culture": "visual-culture",
"vizualnaya-poeziya": "visual-poetry",
"vladimir-lenin": "vladimir-lenin",
"vladimir-mayakovskiy": "vladimir-mayakovsky",
"vladimir-nabokov": "vladimir-nabokov",
"vladimir-putin": "vladimir-putin",
"vladimir-sorokin": "vladimir-sorokin",
"vladimir-voynovich": "vladimir-voynovich",
"vnutrenniy-opyt": "inner-expirience",
"volga": "volga",
"volontery": "volonteurs",
"vong-karvay": "wong-karwai",
"vospominaniya": "memories",
"vostok": "east",
"voyna-na-ukraine": "war-in-ukraine",
"voyna-v-ukraine": "war-in-ukraine",
"vremya": "time",
"vudi-allen": "woody-allen",
"vynuzhdennye-otnosheniya": "forced-relationship",
"war": "war",
"war-in-ukraine-images": "war-in-ukrahine-images",
"women": "women",
"work": "work",
"writers": "writers",
"xx-century": "xx-century",
"yakob-yordans": "yakob-yordans",
"yan-vermeer": "yan-vermeer",
"yanka-dyagileva": "yanka-dyagileva",
"yaponskaya-literatura": "japan-literature",
"yazychestvo": "paganism",
"youth": "youth",
"yozef-rot": "yozef-rot",
"yurgen-habermas": "jorgen-habermas",
"za-liniey-mannergeyma": "behind-mannerheim-line",
"zabota": "care",
"zahar-prilepin": "zahar-prilepin",
"zakonodatelstvo": "laws",
"zakony-mira": "world-laws",
"zametki": "notes",
"zhelanie": "wish",
"zhivotnye": "animals",
"zhoze-saramago": "jose-saramago",
"zigmund-freyd": "sigmund-freud",
"zolotaya-orda": "golden-horde",
"zombi": "zombie",
"zombi-simpsony": "zombie-simpsons"
}

View File

@ -0,0 +1,31 @@
from base.orm import local_session
from migration.html2text import html2text
from orm import Topic
def migrate(entry):
body_orig = entry.get("description", "").replace("&nbsp;", " ")
topic_dict = {
"slug": entry["slug"],
"oid": entry["_id"],
"title": entry["title"].replace("&nbsp;", " "),
"body": html2text(body_orig),
}
with local_session() as session:
slug = topic_dict["slug"]
topic = session.query(Topic).filter(Topic.slug == slug).first() or Topic.create(
**topic_dict
)
if not topic:
raise Exception("no topic!")
if topic:
if len(topic.title) > len(topic_dict["title"]):
Topic.update(topic, {"title": topic_dict["title"]})
if len(topic.body) < len(topic_dict["body"]):
Topic.update(topic, {"body": topic_dict["body"]})
session.commit()
# print(topic.__dict__)
rt = topic.__dict__.copy()
del rt["_sa_instance_state"]
return rt

156
migration/tables/users.py Normal file
View File

@ -0,0 +1,156 @@
import re
from bs4 import BeautifulSoup
from dateutil.parser import parse
from sqlalchemy.exc import IntegrityError
from base.orm import local_session
from orm.user import AuthorFollower, User, UserRating
def migrate(entry): # noqa: C901
if "subscribedTo" in entry:
del entry["subscribedTo"]
email = entry["emails"][0]["address"]
user_dict = {
"oid": entry["_id"],
"roles": [],
"ratings": [],
"username": email,
"email": email,
"createdAt": parse(entry["createdAt"]),
"emailConfirmed": ("@discours.io" in email) or bool(entry["emails"][0]["verified"]),
"muted": False, # amnesty
"links": [],
"name": "anonymous",
"password": entry["services"]["password"].get("bcrypt"),
}
if "updatedAt" in entry:
user_dict["updatedAt"] = parse(entry["updatedAt"])
if "wasOnlineAt" in entry:
user_dict["lastSeen"] = parse(entry["wasOnlineAt"])
if entry.get("profile"):
# slug
slug = entry["profile"].get("path").lower()
slug = re.sub("[^0-9a-zA-Z]+", "-", slug).strip()
user_dict["slug"] = slug
bio = (
(entry.get("profile", {"bio": ""}).get("bio") or "")
.replace(r"\(", "(")
.replace(r"\)", ")")
)
bio_text = BeautifulSoup(bio, features="lxml").text
if len(bio_text) > 120:
user_dict["about"] = bio_text
else:
user_dict["bio"] = bio_text
# userpic
try:
user_dict["userpic"] = (
"https://images.discours.io/unsafe/" + entry["profile"]["thumborId"]
)
except KeyError:
try:
user_dict["userpic"] = entry["profile"]["image"]["url"]
except KeyError:
user_dict["userpic"] = ""
# name
fn = entry["profile"].get("firstName", "")
ln = entry["profile"].get("lastName", "")
name = fn if fn else ""
name = (name + " " + ln) if ln else name
if not name:
name = slug if slug else "anonymous"
name = entry["profile"]["path"].lower().strip().replace(" ", "-") if len(name) < 2 else name
user_dict["name"] = name
# links
fb = entry["profile"].get("facebook", False)
if fb:
user_dict["links"].append(fb)
vk = entry["profile"].get("vkontakte", False)
if vk:
user_dict["links"].append(vk)
tr = entry["profile"].get("twitter", False)
if tr:
user_dict["links"].append(tr)
ws = entry["profile"].get("website", False)
if ws:
user_dict["links"].append(ws)
# some checks
if not user_dict["slug"] and len(user_dict["links"]) > 0:
user_dict["slug"] = user_dict["links"][0].split("/")[-1]
user_dict["slug"] = user_dict.get("slug", user_dict["email"].split("@")[0])
oid = user_dict["oid"]
user_dict["slug"] = user_dict["slug"].lower().strip().replace(" ", "-")
try:
user = User.create(**user_dict.copy())
except IntegrityError:
print("[migration] cannot create user " + user_dict["slug"])
with local_session() as session:
old_user = session.query(User).filter(User.slug == user_dict["slug"]).first()
old_user.oid = oid
old_user.password = user_dict["password"]
session.commit()
user = old_user
if not user:
print("[migration] ERROR: cannot find user " + user_dict["slug"])
raise Exception
user_dict["id"] = user.id
return user_dict
def post_migrate():
old_discours_dict = {
"slug": "old-discours",
"username": "old-discours",
"email": "old@discours.io",
"name": "Просмотры на старой версии сайта",
}
with local_session() as session:
old_discours_user = User.create(**old_discours_dict)
session.add(old_discours_user)
session.commit()
def migrate_2stage(entry, id_map):
ce = 0
for rating_entry in entry.get("ratings", []):
rater_oid = rating_entry["createdBy"]
rater_slug = id_map.get(rater_oid)
if not rater_slug:
ce += 1
# print(rating_entry)
continue
oid = entry["_id"]
author_slug = id_map.get(oid)
with local_session() as session:
try:
rater = session.query(User).where(User.slug == rater_slug).one()
user = session.query(User).where(User.slug == author_slug).one()
user_rating_dict = {
"value": rating_entry["value"],
"rater": rater.id,
"user": user.id,
}
user_rating = UserRating.create(**user_rating_dict)
if user_rating_dict["value"] > 0:
af = AuthorFollower.create(author=user.id, follower=rater.id, auto=True)
session.add(af)
session.add(user_rating)
session.commit()
except IntegrityError:
print("[migration] cannot rate " + author_slug + "`s by " + rater_slug)
except Exception as e:
print(e)
return ce

10
migration/utils.py Normal file
View File

@ -0,0 +1,10 @@
from datetime import datetime
from json import JSONEncoder
class DateTimeEncoder(JSONEncoder):
def default(self, z):
if isinstance(z, datetime):
return str(z)
else:
return super().default(z)

View File

@ -1,14 +1,9 @@
log_format custom '$remote_addr - $remote_user [$time_local] "$request" '
'origin=$http_origin status=$status '
'"$http_referer" "$http_user_agent"';
{{ $proxy_settings := "proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection $http_connection; proxy_set_header Host $http_host; proxy_set_header X-Request-Start $msec;" }} {{ $proxy_settings := "proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection $http_connection; proxy_set_header Host $http_host; proxy_set_header X-Request-Start $msec;" }}
{{ $gzip_settings := "gzip on; gzip_min_length 1100; gzip_buffers 4 32k; gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/x-javascript application/json application/xml application/rss+xml font/truetype application/x-font-ttf font/opentype application/vnd.ms-fontobject image/svg+xml; gzip_vary on; gzip_comp_level 6;" }} {{ $gzip_settings := "gzip on; gzip_min_length 1100; gzip_buffers 4 32k; gzip_types text/css text/javascript text/xml text/plain text/x-component application/javascript application/x-javascript application/json application/xml application/rss+xml font/truetype application/x-font-ttf font/opentype application/vnd.ms-fontobject image/svg+xml; gzip_vary on; gzip_comp_level 6;" }}
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=my_cache:10m max_size=1g {{ $cors_headers_options := "if ($request_method = 'OPTIONS') { add_header 'Access-Control-Allow-Origin' '$allow_origin' always; add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization'; add_header 'Access-Control-Allow-Credentials' 'true'; add_header 'Access-Control-Max-Age' 1728000; add_header 'Content-Type' 'text/plain; charset=utf-8'; add_header 'Content-Length' 0; return 204; }" }}
inactive=60m use_temp_path=off; {{ $cors_headers_post := "if ($request_method = 'POST') { add_header 'Access-Control-Allow-Origin' '$allow_origin' always; add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always; add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization' always; add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range' always; add_header 'Access-Control-Allow-Credentials' 'true' always; }" }}
limit_conn_zone $binary_remote_addr zone=addr:10m; {{ $cors_headers_get := "if ($request_method = 'GET') { add_header 'Access-Control-Allow-Origin' '$allow_origin' always; add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always; add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization' always; add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range' always; add_header 'Access-Control-Allow-Credentials' 'true' always; }" }}
limit_req_zone $binary_remote_addr zone=req_zone:10m rate=20r/s;
{{ range $port_map := .PROXY_PORT_MAP | split " " }} {{ range $port_map := .PROXY_PORT_MAP | split " " }}
{{ $port_map_list := $port_map | split ":" }} {{ $port_map_list := $port_map | split ":" }}
@ -21,15 +16,14 @@ server {
listen [::]:{{ $listen_port }}; listen [::]:{{ $listen_port }};
listen {{ $listen_port }}; listen {{ $listen_port }};
server_name {{ $.NOSSL_SERVER_NAME }}; server_name {{ $.NOSSL_SERVER_NAME }};
access_log /var/log/nginx/{{ $.APP }}-access.log custom; access_log /var/log/nginx/{{ $.APP }}-access.log;
error_log /var/log/nginx/{{ $.APP }}-error.log; error_log /var/log/nginx/{{ $.APP }}-error.log;
client_max_body_size 100M;
{{ else if eq $scheme "https" }} {{ else if eq $scheme "https" }}
listen [::]:{{ $listen_port }} ssl http2; listen [::]:{{ $listen_port }} ssl http2;
listen {{ $listen_port }} ssl http2; listen {{ $listen_port }} ssl http2;
server_name {{ $.NOSSL_SERVER_NAME }}; server_name {{ $.NOSSL_SERVER_NAME }};
access_log /var/log/nginx/{{ $.APP }}-access.log custom; access_log /var/log/nginx/{{ $.APP }}-access.log;
error_log /var/log/nginx/{{ $.APP }}-error.log; error_log /var/log/nginx/{{ $.APP }}-error.log;
ssl_certificate {{ $.APP_SSL_PATH }}/server.crt; ssl_certificate {{ $.APP_SSL_PATH }}/server.crt;
ssl_certificate_key {{ $.APP_SSL_PATH }}/server.key; ssl_certificate_key {{ $.APP_SSL_PATH }}/server.key;
@ -37,37 +31,21 @@ server {
ssl_prefer_server_ciphers off; ssl_prefer_server_ciphers off;
keepalive_timeout 70; keepalive_timeout 70;
keepalive_requests 500;
proxy_read_timeout 3600;
limit_conn addr 10000;
client_max_body_size 100M;
{{ end }} {{ end }}
location / { location / {
proxy_pass http://{{ $.APP }}-{{ $upstream_port }}; proxy_pass http://{{ $.APP }}-{{ $upstream_port }};
{{ $proxy_settings }} {{ $proxy_settings }}
{{ $gzip_settings }} {{ $gzip_settings }}
{{ $cors_headers_options }}
proxy_cache my_cache; {{ $cors_headers_post }}
proxy_cache_revalidate on; {{ $cors_headers_get }}
proxy_cache_min_uses 2;
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
proxy_cache_background_update on;
proxy_cache_lock on;
# Connections and request limits increase (bad for DDos)
limit_req zone=req_zone burst=10 nodelay;
} }
location ~* \.(jpg|jpeg|png|gif|ico|css|js)$ { location ~* \.(jpg|jpeg|png|gif|ico|css|js)$ {
proxy_pass http://{{ $.APP }}-{{ $upstream_port }}; expires 30d; # This means that the client can cache these resources for 30 days.
expires 30d; add_header Cache-Control "public, no-transform";
add_header Cache-Control "public, no-transform";
}
location ~* \.(mp3|wav|ogg|flac|aac|aif|webm)$ {
proxy_pass http://{{ $.APP }}-{{ $upstream_port }};
} }
@ -95,6 +73,7 @@ server {
internal; internal;
} }
# include /home/dokku/gateway/nginx.conf.d/*.conf;
include {{ $.DOKKU_ROOT }}/{{ $.APP }}/nginx.conf.d/*.conf; include {{ $.DOKKU_ROOT }}/{{ $.APP }}/nginx.conf.d/*.conf;
} }
{{ end }} {{ end }}

36
orm/__init__.py Normal file
View File

@ -0,0 +1,36 @@
from base.orm import Base, engine
from orm.community import Community
from orm.notification import Notification
from orm.rbac import Operation, Permission, Resource, Role
from orm.reaction import Reaction
from orm.shout import Shout
from orm.topic import Topic, TopicFollower
from orm.user import User, UserRating
def init_tables():
Base.metadata.create_all(engine)
Operation.init_table()
Resource.init_table()
User.init_table()
Community.init_table()
Role.init_table()
UserRating.init_table()
Shout.init_table()
print("[orm] tables initialized")
__all__ = [
"User",
"Role",
"Operation",
"Permission",
"Community",
"Shout",
"Topic",
"TopicFollower",
"Notification",
"Reaction",
"UserRating",
"init_tables",
]

View File

@ -1,136 +0,0 @@
import time
from sqlalchemy import JSON, Boolean, Column, ForeignKey, Index, Integer, String
from services.db import Base
# from sqlalchemy_utils import TSVectorType
class AuthorRating(Base):
"""
Рейтинг автора от другого автора.
Attributes:
rater (int): ID оценивающего автора
author (int): ID оцениваемого автора
plus (bool): Положительная/отрицательная оценка
"""
__tablename__ = "author_rating"
id = None # type: ignore
rater = Column(ForeignKey("author.id"), primary_key=True)
author = Column(ForeignKey("author.id"), primary_key=True)
plus = Column(Boolean)
# Определяем индексы
__table_args__ = (
# Индекс для быстрого поиска всех оценок конкретного автора
Index("idx_author_rating_author", "author"),
# Индекс для быстрого поиска всех оценок, оставленных конкретным автором
Index("idx_author_rating_rater", "rater"),
)
class AuthorFollower(Base):
"""
Подписка одного автора на другого.
Attributes:
follower (int): ID подписчика
author (int): ID автора, на которого подписываются
created_at (int): Время создания подписки
auto (bool): Признак автоматической подписки
"""
__tablename__ = "author_follower"
id = None # type: ignore
follower = Column(ForeignKey("author.id"), primary_key=True)
author = Column(ForeignKey("author.id"), primary_key=True)
created_at = Column(Integer, nullable=False, default=lambda: int(time.time()))
auto = Column(Boolean, nullable=False, default=False)
# Определяем индексы
__table_args__ = (
# Индекс для быстрого поиска всех подписчиков автора
Index("idx_author_follower_author", "author"),
# Индекс для быстрого поиска всех авторов, на которых подписан конкретный автор
Index("idx_author_follower_follower", "follower"),
)
class AuthorBookmark(Base):
"""
Закладка автора на публикацию.
Attributes:
author (int): ID автора
shout (int): ID публикации
"""
__tablename__ = "author_bookmark"
id = None # type: ignore
author = Column(ForeignKey("author.id"), primary_key=True)
shout = Column(ForeignKey("shout.id"), primary_key=True)
# Определяем индексы
__table_args__ = (
# Индекс для быстрого поиска всех закладок автора
Index("idx_author_bookmark_author", "author"),
# Индекс для быстрого поиска всех авторов, добавивших публикацию в закладки
Index("idx_author_bookmark_shout", "shout"),
)
class Author(Base):
"""
Модель автора в системе.
Attributes:
name (str): Отображаемое имя
slug (str): Уникальный строковый идентификатор
bio (str): Краткая биография/статус
about (str): Полное описание
pic (str): URL изображения профиля
links (dict): Ссылки на социальные сети и сайты
created_at (int): Время создания профиля
last_seen (int): Время последнего посещения
updated_at (int): Время последнего обновления
deleted_at (int): Время удаления (если профиль удален)
"""
__tablename__ = "author"
name = Column(String, nullable=True, comment="Display name")
slug = Column(String, unique=True, comment="Author's slug")
bio = Column(String, nullable=True, comment="Bio") # status description
about = Column(String, nullable=True, comment="About") # long and formatted
pic = Column(String, nullable=True, comment="Picture")
links = Column(JSON, nullable=True, comment="Links")
created_at = Column(Integer, nullable=False, default=lambda: int(time.time()))
last_seen = Column(Integer, nullable=False, default=lambda: int(time.time()))
updated_at = Column(Integer, nullable=False, default=lambda: int(time.time()))
deleted_at = Column(Integer, nullable=True, comment="Deleted at")
# search_vector = Column(
# TSVectorType("name", "slug", "bio", "about", regconfig="pg_catalog.russian")
# )
# Определяем индексы
__table_args__ = (
# Индекс для быстрого поиска по имени
Index("idx_author_name", "name"),
# Индекс для быстрого поиска по slug
Index("idx_author_slug", "slug"),
# Индекс для фильтрации неудаленных авторов
Index(
"idx_author_deleted_at", "deleted_at", postgresql_where=deleted_at.is_(None)
),
# Индекс для сортировки по времени создания (для новых авторов)
Index("idx_author_created_at", "created_at"),
# Индекс для сортировки по времени последнего посещения
Index("idx_author_last_seen", "last_seen"),
)

View File

@ -1,14 +1,12 @@
import time from sqlalchemy import Column, DateTime, ForeignKey, String, func
from sqlalchemy import Column, ForeignKey, Integer, String from base.orm import Base
from services.db import Base
class ShoutCollection(Base): class ShoutCollection(Base):
__tablename__ = "shout_collection" __tablename__ = "shout_collection"
id = None # type: ignore id = None
shout = Column(ForeignKey("shout.id"), primary_key=True) shout = Column(ForeignKey("shout.id"), primary_key=True)
collection = Column(ForeignKey("collection.id"), primary_key=True) collection = Column(ForeignKey("collection.id"), primary_key=True)
@ -20,6 +18,6 @@ class Collection(Base):
title = Column(String, nullable=False, comment="Title") title = Column(String, nullable=False, comment="Title")
body = Column(String, nullable=True, comment="Body") body = Column(String, nullable=True, comment="Body")
pic = Column(String, nullable=True, comment="Picture") pic = Column(String, nullable=True, comment="Picture")
created_at = Column(Integer, default=lambda: int(time.time())) createdAt = Column(DateTime(timezone=True), server_default=func.now(), comment="Created At")
created_by = Column(ForeignKey("author.id"), comment="Created By") createdBy = Column(ForeignKey("user.id"), comment="Created By")
published_at = Column(Integer, default=lambda: int(time.time())) publishedAt = Column(DateTime(timezone=True), server_default=func.now(), comment="Published At")

View File

@ -1,106 +1,38 @@
import enum from sqlalchemy import Column, DateTime, ForeignKey, String, func
import time
from sqlalchemy import Column, ForeignKey, Integer, String, Text, distinct, func from base.orm import Base, local_session
from sqlalchemy.ext.hybrid import hybrid_property
from orm.author import Author
from services.db import Base
class CommunityRole(enum.Enum):
READER = "reader" # can read and comment
AUTHOR = "author" # + can vote and invite collaborators
ARTIST = "artist" # + can be credited as featured artist
EXPERT = "expert" # + can add proof or disproof to shouts, can manage topics
EDITOR = "editor" # + can manage topics, comments and community settings
@classmethod
def as_string_array(cls, roles):
return [role.value for role in roles]
class CommunityFollower(Base): class CommunityFollower(Base):
__tablename__ = "community_author" __tablename__ = "community_followers"
author = Column(ForeignKey("author.id"), primary_key=True) id = None
community = Column(ForeignKey("community.id"), primary_key=True) follower: Column = Column(ForeignKey("user.id"), primary_key=True)
joined_at = Column(Integer, nullable=False, default=lambda: int(time.time())) community: Column = Column(ForeignKey("community.id"), primary_key=True)
roles = Column(Text, nullable=True, comment="Roles (comma-separated)") joinedAt = Column(
DateTime(timezone=True), nullable=False, server_default=func.now(), comment="Created at"
def set_roles(self, roles): )
self.roles = CommunityRole.as_string_array(roles) # role = Column(ForeignKey(Role.id), nullable=False, comment="Role for member")
def get_roles(self):
return [CommunityRole(role) for role in self.roles]
class Community(Base): class Community(Base):
__tablename__ = "community" __tablename__ = "community"
name = Column(String, nullable=False) name = Column(String, nullable=False, comment="Name")
slug = Column(String, nullable=False, unique=True) slug = Column(String, nullable=False, unique=True, comment="Slug")
desc = Column(String, nullable=False, default="") desc = Column(String, nullable=False, default="")
pic = Column(String, nullable=False, default="") pic = Column(String, nullable=False, default="")
created_at = Column(Integer, nullable=False, default=lambda: int(time.time())) createdAt = Column(
created_by = Column(ForeignKey("author.id"), nullable=False) DateTime(timezone=True), nullable=False, server_default=func.now(), comment="Created at"
)
@hybrid_property @staticmethod
def stat(self): def init_table():
return CommunityStats(self) with local_session() as session:
d = session.query(Community).filter(Community.slug == "discours").first()
@property if not d:
def role_list(self): d = Community.create(name="Дискурс", slug="discours")
return self.roles.split(",") if self.roles else [] session.add(d)
session.commit()
@role_list.setter Community.default_community = d
def role_list(self, value): print("[orm] default community id: %s" % d.id)
self.roles = ",".join(value) if value else None
class CommunityStats:
def __init__(self, community):
self.community = community
@property
def shouts(self):
from orm.shout import Shout
return self.community.session.query(func.count(Shout.id)).filter(Shout.community == self.community.id).scalar()
@property
def followers(self):
return (
self.community.session.query(func.count(CommunityFollower.author))
.filter(CommunityFollower.community == self.community.id)
.scalar()
)
@property
def authors(self):
from orm.shout import Shout
# author has a shout with community id and its featured_at is not null
return (
self.community.session.query(func.count(distinct(Author.id)))
.join(Shout)
.filter(Shout.community == self.community.id, Shout.featured_at.is_not(None), Author.id.in_(Shout.authors))
.scalar()
)
class CommunityAuthor(Base):
__tablename__ = "community_author"
id = Column(Integer, primary_key=True)
community_id = Column(Integer, ForeignKey("community.id"))
author_id = Column(Integer, ForeignKey("author.id"))
roles = Column(Text, nullable=True, comment="Roles (comma-separated)")
@property
def role_list(self):
return self.roles.split(",") if self.roles else []
@role_list.setter
def role_list(self, value):
self.roles = ",".join(value) if value else None

View File

@ -1,105 +0,0 @@
import time
from sqlalchemy import JSON, Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from orm.author import Author
from orm.topic import Topic
from services.db import Base
class DraftTopic(Base):
__tablename__ = "draft_topic"
id = None # type: ignore
shout = Column(ForeignKey("draft.id"), primary_key=True, index=True)
topic = Column(ForeignKey("topic.id"), primary_key=True, index=True)
main = Column(Boolean, nullable=True)
class DraftAuthor(Base):
__tablename__ = "draft_author"
id = None # type: ignore
shout = Column(ForeignKey("draft.id"), primary_key=True, index=True)
author = Column(ForeignKey("author.id"), primary_key=True, index=True)
caption = Column(String, nullable=True, default="")
class Draft(Base):
__tablename__ = "draft"
# required
created_at: int = Column(Integer, nullable=False, default=lambda: int(time.time()))
# Колонки для связей с автором
created_by: int = Column("created_by", ForeignKey("author.id"), nullable=False)
community: int = Column("community", ForeignKey("community.id"), nullable=False, default=1)
# optional
layout: str = Column(String, nullable=True, default="article")
slug: str = Column(String, unique=True)
title: str = Column(String, nullable=True)
subtitle: str | None = Column(String, nullable=True)
lead: str | None = Column(String, nullable=True)
body: str = Column(String, nullable=False, comment="Body")
media: dict | None = Column(JSON, nullable=True)
cover: str | None = Column(String, nullable=True, comment="Cover image url")
cover_caption: str | None = Column(String, nullable=True, comment="Cover image alt caption")
lang: str = Column(String, nullable=False, default="ru", comment="Language")
seo: str | None = Column(String, nullable=True) # JSON
# auto
updated_at: int | None = Column(Integer, nullable=True, index=True)
deleted_at: int | None = Column(Integer, nullable=True, index=True)
updated_by: int | None = Column("updated_by", ForeignKey("author.id"), nullable=True)
deleted_by: int | None = Column("deleted_by", ForeignKey("author.id"), nullable=True)
# --- Relationships ---
# Только many-to-many связи через вспомогательные таблицы
authors = relationship(Author, secondary="draft_author", lazy="select")
topics = relationship(Topic, secondary="draft_topic", lazy="select")
# Связь с Community (если нужна как объект, а не ID)
# community = relationship("Community", foreign_keys=[community_id], lazy="joined")
# Пока оставляем community_id как ID
# Связь с публикацией (один-к-одному или один-к-нулю)
# Загружается через joinedload в резолвере
publication = relationship(
"Shout",
primaryjoin="Draft.id == Shout.draft",
foreign_keys="Shout.draft",
uselist=False,
lazy="noload", # Не грузим по умолчанию, только через options
viewonly=True # Указываем, что это связь только для чтения
)
def dict(self):
"""
Сериализует объект Draft в словарь.
Гарантирует, что поля topics и authors всегда будут списками.
"""
return {
"id": self.id,
"created_at": self.created_at,
"created_by": self.created_by,
"community": self.community,
"layout": self.layout,
"slug": self.slug,
"title": self.title,
"subtitle": self.subtitle,
"lead": self.lead,
"body": self.body,
"media": self.media or [],
"cover": self.cover,
"cover_caption": self.cover_caption,
"lang": self.lang,
"seo": self.seo,
"updated_at": self.updated_at,
"deleted_at": self.deleted_at,
"updated_by": self.updated_by,
"deleted_by": self.deleted_by,
# Гарантируем, что topics и authors всегда будут списками
"topics": [topic.dict() for topic in (self.topics or [])],
"authors": [author.dict() for author in (self.authors or [])]
}

View File

@ -1,35 +0,0 @@
import enum
from sqlalchemy import Column, ForeignKey, String
from sqlalchemy.orm import relationship
from services.db import Base
class InviteStatus(enum.Enum):
PENDING = "PENDING"
ACCEPTED = "ACCEPTED"
REJECTED = "REJECTED"
@classmethod
def from_string(cls, value):
return cls(value)
class Invite(Base):
__tablename__ = "invite"
inviter_id = Column(ForeignKey("author.id"), primary_key=True)
author_id = Column(ForeignKey("author.id"), primary_key=True)
shout_id = Column(ForeignKey("shout.id"), primary_key=True)
status = Column(String, default=InviteStatus.PENDING.value)
inviter = relationship("Author", foreign_keys=[inviter_id])
author = relationship("Author", foreign_keys=[author_id])
shout = relationship("Shout")
def set_status(self, status: InviteStatus):
self.status = status.value
def get_status(self) -> InviteStatus:
return InviteStatus.from_string(self.status)

View File

@ -1,63 +1,26 @@
import enum from enum import Enum as Enumeration
import time
from sqlalchemy import JSON, Column, ForeignKey, Integer, String from sqlalchemy import Boolean, Column, DateTime, Enum, ForeignKey, Integer, func
from sqlalchemy.orm import relationship from sqlalchemy.dialects.postgresql import JSONB
from orm.author import Author from base.orm import Base
from services.db import Base
class NotificationEntity(enum.Enum): class NotificationType(Enumeration):
REACTION = "reaction" NEW_COMMENT = 1
SHOUT = "shout" NEW_REPLY = 2
FOLLOWER = "follower"
COMMUNITY = "community"
@classmethod
def from_string(cls, value):
return cls(value)
class NotificationAction(enum.Enum):
CREATE = "create"
UPDATE = "update"
DELETE = "delete"
SEEN = "seen"
FOLLOW = "follow"
UNFOLLOW = "unfollow"
@classmethod
def from_string(cls, value):
return cls(value)
class NotificationSeen(Base):
__tablename__ = "notification_seen"
viewer = Column(ForeignKey("author.id"), primary_key=True)
notification = Column(ForeignKey("notification.id"), primary_key=True)
class Notification(Base): class Notification(Base):
__tablename__ = "notification" __tablename__ = "notification"
id = Column(Integer, primary_key=True, autoincrement=True) shout: Column = Column(ForeignKey("shout.id"), index=True)
created_at = Column(Integer, server_default=str(int(time.time()))) reaction: Column = Column(ForeignKey("reaction.id"), index=True)
entity = Column(String, nullable=False) user: Column = Column(ForeignKey("user.id"), index=True)
action = Column(String, nullable=False) createdAt = Column(
payload = Column(JSON, nullable=True) DateTime(timezone=True), nullable=False, server_default=func.now(), index=True
)
seen = relationship(Author, secondary="notification_seen") seen = Column(Boolean, nullable=False, default=False, index=True)
type = Column(Enum(NotificationType), nullable=False)
def set_entity(self, entity: NotificationEntity): data = Column(JSONB, nullable=True)
self.entity = entity.value occurrences = Column(Integer, default=1)
def get_entity(self) -> NotificationEntity:
return NotificationEntity.from_string(self.entity)
def set_action(self, action: NotificationAction):
self.action = action.value
def get_action(self) -> NotificationAction:
return NotificationAction.from_string(self.action)

View File

@ -1,30 +0,0 @@
from orm.reaction import ReactionKind
PROPOSAL_REACTIONS = [
ReactionKind.ACCEPT.value,
ReactionKind.REJECT.value,
ReactionKind.AGREE.value,
ReactionKind.DISAGREE.value,
ReactionKind.ASK.value,
ReactionKind.PROPOSE.value,
]
PROOF_REACTIONS = [ReactionKind.PROOF.value, ReactionKind.DISPROOF.value]
RATING_REACTIONS = [ReactionKind.LIKE.value, ReactionKind.DISLIKE.value]
def is_negative(x):
return x in [
ReactionKind.DISLIKE.value,
ReactionKind.DISPROOF.value,
ReactionKind.REJECT.value,
]
def is_positive(x):
return x in [
ReactionKind.ACCEPT.value,
ReactionKind.LIKE.value,
ReactionKind.PROOF.value,
]

178
orm/rbac.py Normal file
View File

@ -0,0 +1,178 @@
import warnings
from sqlalchemy import Column, ForeignKey, String, TypeDecorator, UniqueConstraint
from sqlalchemy.orm import relationship
from base.orm import REGISTRY, Base, local_session
# Role Based Access Control #
class ClassType(TypeDecorator):
impl = String
@property
def python_type(self):
return NotImplemented
def process_literal_param(self, value, dialect):
return NotImplemented
def process_bind_param(self, value, dialect):
return value.__name__ if isinstance(value, type) else str(value)
def process_result_value(self, value, dialect):
class_ = REGISTRY.get(value)
if class_ is None:
warnings.warn(f"Can't find class <{value}>,find it yourself!", stacklevel=2)
return class_
class Role(Base):
__tablename__ = "role"
name = Column(String, nullable=False, comment="Role Name")
desc = Column(String, nullable=True, comment="Role Description")
community = Column(
ForeignKey("community.id", ondelete="CASCADE"),
nullable=False,
comment="Community",
)
permissions = relationship(lambda: Permission)
@staticmethod
def init_table():
with local_session() as session:
r = session.query(Role).filter(Role.name == "author").first()
if r:
Role.default_role = r
return
r1 = Role.create(
name="author",
desc="Role for an author",
community=1,
)
session.add(r1)
Role.default_role = r1
r2 = Role.create(
name="reader",
desc="Role for a reader",
community=1,
)
session.add(r2)
r3 = Role.create(
name="expert",
desc="Role for an expert",
community=1,
)
session.add(r3)
r4 = Role.create(
name="editor",
desc="Role for an editor",
community=1,
)
session.add(r4)
class Operation(Base):
__tablename__ = "operation"
name = Column(String, nullable=False, unique=True, comment="Operation Name")
@staticmethod
def init_table():
with local_session() as session:
for name in ["create", "update", "delete", "load"]:
"""
* everyone can:
- load shouts
- load topics
- load reactions
- create an account to become a READER
* readers can:
- update and delete their account
- load chats
- load messages
- create reaction of some shout's author allowed kinds
- create shout to become an AUTHOR
* authors can:
- update and delete their shout
- invite other authors to edit shout and chat
- manage allowed reactions for their shout
* pros can:
- create/update/delete their community
- create/update/delete topics for their community
"""
op = session.query(Operation).filter(Operation.name == name).first()
if not op:
op = Operation.create(name=name)
session.add(op)
session.commit()
class Resource(Base):
__tablename__ = "resource"
resourceClass = Column(String, nullable=False, unique=True, comment="Resource class")
name = Column(String, nullable=False, unique=True, comment="Resource name")
# TODO: community = Column(ForeignKey())
@staticmethod
def init_table():
with local_session() as session:
for res in [
"shout",
"topic",
"reaction",
"chat",
"message",
"invite",
"community",
"user",
]:
r = session.query(Resource).filter(Resource.name == res).first()
if not r:
r = Resource.create(name=res, resourceClass=res)
session.add(r)
session.commit()
class Permission(Base):
__tablename__ = "permission"
__table_args__ = (
UniqueConstraint("role", "operation", "resource"),
{"extend_existing": True},
)
role: Column = Column(ForeignKey("role.id", ondelete="CASCADE"), nullable=False, comment="Role")
operation: Column = Column(
ForeignKey("operation.id", ondelete="CASCADE"),
nullable=False,
comment="Operation",
)
resource: Column = Column(
ForeignKey("resource.id", ondelete="CASCADE"),
nullable=False,
comment="Resource",
)
# if __name__ == "__main__":
# Base.metadata.create_all(engine)
# ops = [
# Permission(role=1, operation=1, resource=1),
# Permission(role=1, operation=2, resource=1),
# Permission(role=1, operation=3, resource=1),
# Permission(role=1, operation=4, resource=1),
# Permission(role=2, operation=4, resource=1),
# ]
# global_session.add_all(ops)
# global_session.commit()

View File

@ -1,45 +1,47 @@
import time
from enum import Enum as Enumeration from enum import Enum as Enumeration
from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy import Column, DateTime, Enum, ForeignKey, String, func
from services.db import Base from base.orm import Base
class ReactionKind(Enumeration): class ReactionKind(Enumeration):
AGREE = 1 # +1
DISAGREE = 2 # -1
PROOF = 3 # +1
DISPROOF = 4 # -1
ASK = 5 # +0
PROPOSE = 6 # +0
QUOTE = 7 # +0 bookmark
COMMENT = 8 # +0
ACCEPT = 9 # +1
REJECT = 0 # -1
LIKE = 11 # +1
DISLIKE = 12 # -1
REMARK = 13 # 0
FOOTNOTE = 14 # 0
# TYPE = <reaction index> # rating diff # TYPE = <reaction index> # rating diff
# editor mode
AGREE = "AGREE" # +1
DISAGREE = "DISAGREE" # -1
ASK = "ASK" # +0
PROPOSE = "PROPOSE" # +0
ACCEPT = "ACCEPT" # +1
REJECT = "REJECT" # -1
# expert mode
PROOF = "PROOF" # +1
DISPROOF = "DISPROOF" # -1
# public feed
QUOTE = "QUOTE" # +0 TODO: use to bookmark in collection
COMMENT = "COMMENT" # +0
LIKE = "LIKE" # +1
DISLIKE = "DISLIKE" # -1
class Reaction(Base): class Reaction(Base):
__tablename__ = "reaction" __tablename__ = "reaction"
body = Column(String, nullable=True, comment="Reaction Body")
body = Column(String, default="", comment="Reaction Body") createdAt = Column(
created_at = Column(Integer, nullable=False, default=lambda: int(time.time()), index=True) DateTime(timezone=True), nullable=False, server_default=func.now(), comment="Created at"
updated_at = Column(Integer, nullable=True, comment="Updated at", index=True) )
deleted_at = Column(Integer, nullable=True, comment="Deleted at", index=True) createdBy: Column = Column(ForeignKey("user.id"), nullable=False, index=True, comment="Sender")
deleted_by = Column(ForeignKey("author.id"), nullable=True) updatedAt = Column(DateTime(timezone=True), nullable=True, comment="Updated at")
reply_to = Column(ForeignKey("reaction.id"), nullable=True) updatedBy: Column = Column(
quote = Column(String, nullable=True, comment="Original quoted text") ForeignKey("user.id"), nullable=True, index=True, comment="Last Editor"
shout = Column(ForeignKey("shout.id"), nullable=False, index=True) )
created_by = Column(ForeignKey("author.id"), nullable=False) deletedAt = Column(DateTime(timezone=True), nullable=True, comment="Deleted at")
kind = Column(String, nullable=False, index=True) deletedBy: Column = Column(
ForeignKey("user.id"), nullable=True, index=True, comment="Deleted by"
oid = Column(String) )
shout: Column = Column(ForeignKey("shout.id"), nullable=False, index=True)
replyTo: Column = Column(
ForeignKey("reaction.id"), nullable=True, comment="Reply to reaction ID"
)
range = Column(String, nullable=True, comment="Range in format <start index>:<end>")
kind = Column(Enum(ReactionKind), nullable=False, comment="Reaction kind")
oid = Column(String, nullable=True, comment="Old ID")

View File

@ -1,126 +1,98 @@
import time from sqlalchemy import (
JSON,
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
String,
func,
)
from sqlalchemy.orm import column_property, relationship
from sqlalchemy import JSON, Boolean, Column, ForeignKey, Index, Integer, String from base.orm import Base, local_session
from sqlalchemy.orm import relationship
from orm.author import Author
from orm.reaction import Reaction from orm.reaction import Reaction
from orm.topic import Topic from orm.topic import Topic
from services.db import Base from orm.user import User
class ShoutTopic(Base): class ShoutTopic(Base):
"""
Связь между публикацией и темой.
Attributes:
shout (int): ID публикации
topic (int): ID темы
main (bool): Признак основной темы
"""
__tablename__ = "shout_topic" __tablename__ = "shout_topic"
id = None # type: ignore id = None
shout = Column(ForeignKey("shout.id"), primary_key=True, index=True) shout: Column = Column(ForeignKey("shout.id"), primary_key=True, index=True)
topic = Column(ForeignKey("topic.id"), primary_key=True, index=True) topic: Column = Column(ForeignKey("topic.id"), primary_key=True, index=True)
main = Column(Boolean, nullable=True)
# Определяем дополнительные индексы
__table_args__ = (
# Оптимизированный составной индекс для запросов, которые ищут публикации по теме
Index("idx_shout_topic_topic_shout", "topic", "shout"),
)
class ShoutReactionsFollower(Base): class ShoutReactionsFollower(Base):
__tablename__ = "shout_reactions_followers" __tablename__ = "shout_reactions_followers"
id = None # type: ignore id = None
follower = Column(ForeignKey("author.id"), primary_key=True, index=True) follower: Column = Column(ForeignKey("user.id"), primary_key=True, index=True)
shout = Column(ForeignKey("shout.id"), primary_key=True, index=True) shout: Column = Column(ForeignKey("shout.id"), primary_key=True, index=True)
auto = Column(Boolean, nullable=False, default=False) auto = Column(Boolean, nullable=False, default=False)
created_at = Column(Integer, nullable=False, default=lambda: int(time.time())) createdAt = Column(
deleted_at = Column(Integer, nullable=True) DateTime(timezone=True), nullable=False, server_default=func.now(), comment="Created at"
)
deletedAt = Column(DateTime(timezone=True), nullable=True)
class ShoutAuthor(Base): class ShoutAuthor(Base):
"""
Связь между публикацией и автором.
Attributes:
shout (int): ID публикации
author (int): ID автора
caption (str): Подпись автора
"""
__tablename__ = "shout_author" __tablename__ = "shout_author"
id = None # type: ignore id = None
shout = Column(ForeignKey("shout.id"), primary_key=True, index=True) shout: Column = Column(ForeignKey("shout.id"), primary_key=True, index=True)
author = Column(ForeignKey("author.id"), primary_key=True, index=True) user: Column = Column(ForeignKey("user.id"), primary_key=True, index=True)
caption = Column(String, nullable=True, default="") caption: Column = Column(String, nullable=True, default="")
# Определяем дополнительные индексы
__table_args__ = (
# Оптимизированный индекс для запросов, которые ищут публикации по автору
Index("idx_shout_author_author_shout", "author", "shout"),
)
class Shout(Base): class Shout(Base):
"""
Публикация в системе.
"""
__tablename__ = "shout" __tablename__ = "shout"
created_at: int = Column(Integer, nullable=False, default=lambda: int(time.time())) # timestamps
updated_at: int | None = Column(Integer, nullable=True, index=True) createdAt = Column(
published_at: int | None = Column(Integer, nullable=True, index=True) DateTime(timezone=True), nullable=False, server_default=func.now(), comment="Created at"
featured_at: int | None = Column(Integer, nullable=True, index=True)
deleted_at: int | None = Column(Integer, nullable=True, index=True)
created_by: int = Column(ForeignKey("author.id"), nullable=False)
updated_by: int | None = Column(ForeignKey("author.id"), nullable=True)
deleted_by: int | None = Column(ForeignKey("author.id"), nullable=True)
community: int = Column(ForeignKey("community.id"), nullable=False)
body: str = Column(String, nullable=False, comment="Body")
slug: str = Column(String, unique=True)
cover: str | None = Column(String, nullable=True, comment="Cover image url")
cover_caption: str | None = Column(String, nullable=True, comment="Cover image alt caption")
lead: str | None = Column(String, nullable=True)
title: str = Column(String, nullable=False)
subtitle: str | None = Column(String, nullable=True)
layout: str = Column(String, nullable=False, default="article")
media: dict | None = Column(JSON, nullable=True)
authors = relationship(Author, secondary="shout_author")
topics = relationship(Topic, secondary="shout_topic")
reactions = relationship(Reaction)
lang: str = Column(String, nullable=False, default="ru", comment="Language")
version_of: int | None = Column(ForeignKey("shout.id"), nullable=True)
oid: str | None = Column(String, nullable=True)
seo: str | None = Column(String, nullable=True) # JSON
draft: int | None = Column(ForeignKey("draft.id"), nullable=True)
# Определяем индексы
__table_args__ = (
# Индекс для быстрого поиска неудаленных публикаций
Index("idx_shout_deleted_at", "deleted_at", postgresql_where=deleted_at.is_(None)),
# Индекс для быстрой фильтрации по community
Index("idx_shout_community", "community"),
# Индекс для быстрого поиска по slug
Index("idx_shout_slug", "slug"),
# Составной индекс для фильтрации опубликованных неудаленных публикаций
Index(
"idx_shout_published_deleted",
"published_at",
"deleted_at",
postgresql_where=published_at.is_not(None) & deleted_at.is_(None),
),
) )
updatedAt = Column(DateTime(timezone=True), nullable=True, comment="Updated at")
publishedAt = Column(DateTime(timezone=True), nullable=True)
deletedAt = Column(DateTime(timezone=True), nullable=True)
createdBy: Column = Column(ForeignKey("user.id"), comment="Created By")
deletedBy: Column = Column(ForeignKey("user.id"), nullable=True)
slug = Column(String, unique=True)
cover = Column(String, nullable=True, comment="Cover image url")
lead = Column(String, nullable=True)
description = Column(String, nullable=True)
body = Column(String, nullable=False, comment="Body")
title = Column(String, nullable=True)
subtitle = Column(String, nullable=True)
layout = Column(String, nullable=True)
media = Column(JSON, nullable=True)
authors = relationship(lambda: User, secondary=ShoutAuthor.__tablename__)
topics = relationship(lambda: Topic, secondary=ShoutTopic.__tablename__)
# views from the old Discours website
viewsOld = Column(Integer, default=0)
# views from Ackee tracker on the new Discours website
viewsAckee = Column(Integer, default=0)
views = column_property(viewsOld + viewsAckee)
reactions = relationship(lambda: Reaction)
# TODO: these field should be used or modified
community: Column = Column(ForeignKey("community.id"), default=1)
lang = Column(String, nullable=False, default="ru", comment="Language")
mainTopic: Column = Column(ForeignKey("topic.slug"), nullable=True)
visibility = Column(String, nullable=True) # owner authors community public
versionOf: Column = Column(ForeignKey("shout.id"), nullable=True)
oid = Column(String, nullable=True)
@staticmethod
def init_table():
with local_session() as session:
s = session.query(Shout).first()
if not s:
entry = {"slug": "genesis-block", "body": "", "title": "Ничего", "lang": "ru"}
s = Shout.create(**entry)
session.add(s)
session.commit()

View File

@ -1,66 +1,26 @@
import time from sqlalchemy import Boolean, Column, DateTime, ForeignKey, String, func
from sqlalchemy import JSON, Boolean, Column, ForeignKey, Index, Integer, String from base.orm import Base
from services.db import Base
class TopicFollower(Base): class TopicFollower(Base):
"""
Связь между топиком и его подписчиком.
Attributes:
follower (int): ID подписчика
topic (int): ID топика
created_at (int): Время создания связи
auto (bool): Автоматическая подписка
"""
__tablename__ = "topic_followers" __tablename__ = "topic_followers"
id = None # type: ignore id = None
follower = Column(Integer, ForeignKey("author.id"), primary_key=True) follower: Column = Column(ForeignKey("user.id"), primary_key=True, index=True)
topic = Column(Integer, ForeignKey("topic.id"), primary_key=True) topic: Column = Column(ForeignKey("topic.id"), primary_key=True, index=True)
created_at = Column(Integer, nullable=False, default=int(time.time())) createdAt = Column(
auto = Column(Boolean, nullable=False, default=False) DateTime(timezone=True), nullable=False, server_default=func.now(), comment="Created at"
# Определяем индексы
__table_args__ = (
# Индекс для быстрого поиска всех подписчиков топика
Index("idx_topic_followers_topic", "topic"),
# Индекс для быстрого поиска всех топиков, на которые подписан автор
Index("idx_topic_followers_follower", "follower"),
) )
auto = Column(Boolean, nullable=False, default=False)
class Topic(Base): class Topic(Base):
"""
Модель топика (темы) публикаций.
Attributes:
slug (str): Уникальный строковый идентификатор темы
title (str): Название темы
body (str): Описание темы
pic (str): URL изображения темы
community (int): ID сообщества
oid (str): Старый ID
parent_ids (list): IDs родительских тем
"""
__tablename__ = "topic" __tablename__ = "topic"
slug = Column(String, unique=True) slug = Column(String, unique=True)
title = Column(String, nullable=False, comment="Title") title = Column(String, nullable=False, comment="Title")
body = Column(String, nullable=True, comment="Body") body = Column(String, nullable=True, comment="Body")
pic = Column(String, nullable=True, comment="Picture") pic = Column(String, nullable=True, comment="Picture")
community = Column(ForeignKey("community.id"), default=1) community: Column = Column(ForeignKey("community.id"), default=1, comment="Community")
oid = Column(String, nullable=True, comment="Old ID") oid = Column(String, nullable=True, comment="Old ID")
parent_ids = Column(JSON, nullable=True, comment="Parent Topic IDs")
# Определяем индексы
__table_args__ = (
# Индекс для быстрого поиска по slug
Index("idx_topic_slug", "slug"),
# Индекс для быстрого поиска по сообществу
Index("idx_topic_community", "community"),
)

105
orm/user.py Normal file
View File

@ -0,0 +1,105 @@
from sqlalchemy import JSON as JSONType
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, String, func
from sqlalchemy.orm import relationship
from base.orm import Base, local_session
from orm.rbac import Role
class UserRating(Base):
__tablename__ = "user_rating"
id = None
rater: Column = Column(ForeignKey("user.id"), primary_key=True, index=True)
user: Column = Column(ForeignKey("user.id"), primary_key=True, index=True)
value: Column = Column(Integer)
@staticmethod
def init_table():
pass
class UserRole(Base):
__tablename__ = "user_role"
id = None
user = Column(ForeignKey("user.id"), primary_key=True, index=True)
role = Column(ForeignKey("role.id"), primary_key=True, index=True)
class AuthorFollower(Base):
__tablename__ = "author_follower"
id = None
follower: Column = Column(ForeignKey("user.id"), primary_key=True, index=True)
author: Column = Column(ForeignKey("user.id"), primary_key=True, index=True)
createdAt = Column(
DateTime(timezone=True), nullable=False, server_default=func.now(), comment="Created at"
)
auto = Column(Boolean, nullable=False, default=False)
class User(Base):
__tablename__ = "user"
default_user = None
email = Column(String, unique=True, nullable=False, comment="Email")
username = Column(String, nullable=False, comment="Login")
password = Column(String, nullable=True, comment="Password")
bio = Column(String, nullable=True, comment="Bio") # status description
about = Column(String, nullable=True, comment="About") # long and formatted
userpic = Column(String, nullable=True, comment="Userpic")
name = Column(String, nullable=True, comment="Display name")
slug = Column(String, unique=True, comment="User's slug")
muted = Column(Boolean, default=False)
emailConfirmed = Column(Boolean, default=False)
createdAt = Column(
DateTime(timezone=True), nullable=False, server_default=func.now(), comment="Created at"
)
lastSeen = Column(
DateTime(timezone=True), nullable=False, server_default=func.now(), comment="Was online at"
)
deletedAt = Column(DateTime(timezone=True), nullable=True, comment="Deleted at")
links = Column(JSONType, nullable=True, comment="Links")
oauth = Column(String, nullable=True)
ratings = relationship(UserRating, foreign_keys=UserRating.user)
roles = relationship(lambda: Role, secondary=UserRole.__tablename__)
oid = Column(String, nullable=True)
@staticmethod
def init_table():
with local_session() as session:
default = session.query(User).filter(User.slug == "anonymous").first()
if not default:
default_dict = {
"email": "noreply@discours.io",
"username": "noreply@discours.io",
"name": "Аноним",
"slug": "anonymous",
}
default = User.create(**default_dict)
session.add(default)
discours_dict = {
"email": "welcome@discours.io",
"username": "welcome@discours.io",
"name": "Дискурс",
"slug": "discours",
}
discours = User.create(**discours_dict)
session.add(discours)
session.commit()
User.default_user = default
def get_permission(self):
scope = {}
for role in self.roles:
for p in role.permissions:
if p.resource not in scope:
scope[p.resource] = set()
scope[p.resource].add(p.operation)
print(scope)
return scope
# if __name__ == "__main__":
# print(User.get_permission(user_id=1))

1802
poetry.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,2 +0,0 @@
[tool.ruff]
line-length = 108

8
requirements-dev.txt Executable file
View File

@ -0,0 +1,8 @@
black==23.10.1
flake8==6.1.0
gql_schema_codegen==1.0.1
isort==5.12.0
mypy==1.6.1
pre-commit==3.5.0
pymongo-stubs==0.2.0
sqlalchemy-stubs==0.4

View File

@ -1,6 +0,0 @@
fakeredis
pytest
pytest-asyncio
pytest-cov
mypy
ruff

View File

@ -1,18 +1,37 @@
# own auth aiohttp==3.8.6
bcrypt alembic==1.11.3
authlib ariadne>=0.17.0
passlib asyncio~=3.4.3
opensearch-py authlib==1.2.1
google-analytics-data bcrypt>=4.0.0
colorlog beautifulsoup4~=4.11.1
boto3~=1.28.2
botocore~=1.31.2
bson~=0.5.10
DateTime~=4.7
gql~=3.4.0
graphql-core>=3.0.3
httpx>=0.23.0
itsdangerous
lxml
Mako==1.2.4
MarkupSafe==2.1.3
nltk~=3.8.1
passlib~=1.7.4
psycopg2-binary psycopg2-binary
httpx pydantic>=1.10.2
redis[hiredis] pyjwt>=2.6.0
sentry-sdk[starlette,sqlalchemy] pymystem3~=0.2.0
starlette python-dateutil~=2.8.2
gql python-frontmatter~=1.0.0
ariadne python-multipart~=0.0.6
granian PyYAML>=5.4
orjson requests~=2.28.1
pydantic sentry-sdk>=1.14.0
trafilatura sqlalchemy>=1.4.41
sse-starlette==1.6.5
starlette~=0.23.1
transliterate~=1.10.2
uvicorn>=0.18.3
redis

55
resetdb.sh Executable file
View File

@ -0,0 +1,55 @@
database_name="discoursio"
remote_backup_dir="/var/backups/mongodb"
user="root"
host="v2.discours.io"
server="$user@$host"
dump_dir="./dump"
local_backup_filename="discours-backup.bson.gz.tar"
echo "DATABASE RESET STARTED"
echo "server: $server"
echo "remote backup directory: $remote_backup_dir"
echo "Searching for last backup file..."
last_backup_filename=$(ssh $server "ls -t $remote_backup_dir | head -1")
if [ $? -ne 0 ]; then { echo "Failed to get last backup filename, aborting." ; exit 1; } fi
echo "Last backup file found: $last_backup_filename"
echo "Downloading..."
scp $server:$remote_backup_dir/"$last_backup_filename" "$local_backup_filename"
if [ $? -ne 0 ]; then { echo "Failed to download backup file, aborting." ; exit 1; } fi
echo "Backup file $local_backup_filename downloaded successfully"
echo "Creating dump directory: $dump_dir"
mkdir -p "$dump_dir"
if [ $? -ne 0 ]; then { echo "Failed to create dump directory, aborting." ; exit 1; } fi
echo "$dump_dir directory created"
echo "Unpacking backup file $local_backup_filename to $dump_dir"
tar -xzf "$local_backup_filename" --directory "$dump_dir" --strip-components 1
if [ $? -ne 0 ]; then { echo "Failed to unpack backup, aborting." ; exit 1; } fi
echo "Backup file $local_backup_filename successfully unpacked to $dump_dir"
echo "Removing backup file $local_backup_filename"
rm "$local_backup_filename"
if [ $? -ne 0 ]; then { echo "Failed to remove backup file, aborting." ; exit 1; } fi
echo "Backup file removed"
echo "Dropping database $database_name"
dropdb $database_name --force
if [ $? -ne 0 ]; then { echo "Failed to drop database, aborting." ; exit 1; } fi
echo "Database $database_name dropped"
echo "Creating database $database_name"
createdb $database_name
if [ $? -ne 0 ]; then { echo "Failed to create database, aborting." ; exit 1; } fi
echo "Database $database_name successfully created"
echo "BSON -> JSON"
python3 server.py bson
if [ $? -ne 0 ]; then { echo "BSON -> JSON failed, aborting." ; exit 1; } fi
echo "Start migration"
python3 server.py migrate
if [ $? -ne 0 ]; then { echo "Migration failed, aborting." ; exit 1; } fi
echo 'Done!'

View File

@ -1,132 +1,46 @@
from cache.triggers import events_register # flake8: noqa
from resolvers.author import ( # search_authors,
get_author, from resolvers.auth import (
get_author_followers, auth_send_link,
get_author_follows, confirm_email,
get_author_follows_authors, get_current_user,
get_author_follows_topics, is_email_used,
get_author_id, login,
register_by_email,
sign_out,
)
from resolvers.create.editor import create_shout, delete_shout, update_shout
from resolvers.inbox.chats import create_chat, delete_chat, update_chat
from resolvers.inbox.load import load_chats, load_messages_by, load_recipients
from resolvers.inbox.messages import (
create_message,
delete_message,
mark_as_read,
update_message,
)
from resolvers.inbox.search import search_recipients
from resolvers.notifications import load_notifications
from resolvers.zine.following import follow, unfollow
from resolvers.zine.load import load_shout, load_shouts_by
from resolvers.zine.profile import (
get_authors_all, get_authors_all,
load_authors_by, load_authors_by,
update_author, rate_user,
update_profile,
) )
from resolvers.community import get_communities_all, get_community from resolvers.zine.reactions import (
from resolvers.draft import (
create_draft,
delete_draft,
load_drafts,
publish_draft,
update_draft,
)
from resolvers.editor import (
unpublish_shout,
)
from resolvers.feed import (
load_shouts_coauthored,
load_shouts_discussed,
load_shouts_feed,
load_shouts_followed_by,
)
from resolvers.follower import follow, get_shout_followers, unfollow
from resolvers.notifier import (
load_notifications,
notification_mark_seen,
notifications_seen_after,
notifications_seen_thread,
)
from resolvers.rating import get_my_rates_comments, get_my_rates_shouts, rate_author
from resolvers.reaction import (
create_reaction, create_reaction,
delete_reaction, delete_reaction,
load_comment_ratings,
load_comments_branch,
load_reactions_by, load_reactions_by,
load_shout_comments, reactions_follow,
load_shout_ratings, reactions_unfollow,
update_reaction, update_reaction,
) )
from resolvers.reader import ( from resolvers.zine.topics import (
get_shout,
load_shouts_by,
load_shouts_random_top,
load_shouts_search,
load_shouts_unrated,
)
from resolvers.topic import (
get_topic, get_topic,
get_topic_authors, topic_follow,
get_topic_followers, topic_unfollow,
get_topics_all, topics_all,
get_topics_by_author, topics_by_author,
get_topics_by_community, topics_by_community,
) )
events_register()
__all__ = [
# author
"get_author",
"get_author_id",
"get_author_followers",
"get_author_follows",
"get_author_follows_topics",
"get_author_follows_authors",
"get_authors_all",
"load_authors_by",
"update_author",
## "search_authors",
# community
"get_community",
"get_communities_all",
# topic
"get_topic",
"get_topics_all",
"get_topics_by_community",
"get_topics_by_author",
"get_topic_followers",
"get_topic_authors",
# reader
"get_shout",
"load_shouts_by",
"load_shouts_random_top",
"load_shouts_search",
"load_shouts_unrated",
# feed
"load_shouts_feed",
"load_shouts_coauthored",
"load_shouts_discussed",
"load_shouts_with_topic",
"load_shouts_followed_by",
"load_shouts_authored_by",
# follower
"follow",
"unfollow",
"get_shout_followers",
# reaction
"create_reaction",
"update_reaction",
"delete_reaction",
"load_reactions_by",
"load_shout_comments",
"load_shout_ratings",
"load_comment_ratings",
"load_comments_branch",
# notifier
"load_notifications",
"notifications_seen_thread",
"notifications_seen_after",
"notification_mark_seen",
# rating
"rate_author",
"get_my_rates_comments",
"get_my_rates_shouts",
# draft
"load_drafts",
"create_draft",
"update_draft",
"delete_draft",
"publish_draft",
"publish_shout",
"unpublish_shout",
"unpublish_draft",
]

View File

@ -5,17 +5,18 @@ from datetime import datetime, timezone
from urllib.parse import quote_plus from urllib.parse import quote_plus
from graphql.type import GraphQLResolveInfo from graphql.type import GraphQLResolveInfo
from transliterate import translit
from auth.authenticate import login_required from auth.authenticate import login_required
from auth.credentials import AuthCredentials from auth.credentials import AuthCredentials
from auth.email import send_auth_email from auth.email import send_auth_email
from auth.exceptions import InvalidPassword, InvalidToken, ObjectNotExist, Unauthorized
from auth.identity import Identity, Password from auth.identity import Identity, Password
from auth.jwtcodec import JWTCodec from auth.jwtcodec import JWTCodec
from auth.tokenstorage import TokenStorage from auth.tokenstorage import TokenStorage
from base.exceptions import InvalidPassword, InvalidToken, ObjectNotExist, Unauthorized
from base.orm import local_session
from base.resolvers import mutation, query
from orm import Role, User from orm import Role, User
from services.db import local_session
from services.schema import mutation, query
from settings import SESSION_TOKEN_HEADER from settings import SESSION_TOKEN_HEADER
@ -65,50 +66,9 @@ def create_user(user_dict):
return user return user
def replace_translit(src):
ruchars = "абвгдеёжзийклмнопрстуфхцчшщъыьэюя."
enchars = [
"a",
"b",
"v",
"g",
"d",
"e",
"yo",
"zh",
"z",
"i",
"y",
"k",
"l",
"m",
"n",
"o",
"p",
"r",
"s",
"t",
"u",
"f",
"h",
"c",
"ch",
"sh",
"sch",
"",
"y",
"'",
"e",
"yu",
"ya",
"-",
]
return src.translate(str.maketrans(ruchars, enchars))
def generate_unique_slug(src): def generate_unique_slug(src):
print("[resolvers.auth] generating slug from: " + src) print("[resolvers.auth] generating slug from: " + src)
slug = replace_translit(src.lower()) slug = translit(src, "ru", reversed=True).replace(".", "-").lower()
slug = re.sub("[^0-9a-zA-Z]+", "-", slug) slug = re.sub("[^0-9a-zA-Z]+", "-", slug)
if slug != src: if slug != src:
print("[resolvers.auth] translited name: " + slug) print("[resolvers.auth] translited name: " + slug)

View File

@ -1,384 +0,0 @@
import asyncio
import time
from typing import Optional
from sqlalchemy import select, text
from cache.cache import (
cache_author,
cached_query,
get_cached_author,
get_cached_author_by_user_id,
get_cached_author_followers,
get_cached_follower_authors,
get_cached_follower_topics,
invalidate_cache_by_prefix,
)
from orm.author import Author
from resolvers.stat import get_with_stat
from services.auth import login_required
from services.db import local_session
from services.redis import redis
from services.schema import mutation, query
from utils.logger import root_logger as logger
DEFAULT_COMMUNITIES = [1]
# Вспомогательная функция для получения всех авторов без статистики
async def get_all_authors():
"""
Получает всех авторов без статистики.
Используется для случаев, когда нужен полный список авторов без дополнительной информации.
Returns:
list: Список всех авторов без статистики
"""
cache_key = "authors:all:basic"
# Функция для получения всех авторов из БД
async def fetch_all_authors():
logger.debug("Получаем список всех авторов из БД и кешируем результат")
with local_session() as session:
# Запрос на получение базовой информации об авторах
authors_query = select(Author).where(Author.deleted_at.is_(None))
authors = session.execute(authors_query).scalars().all()
# Преобразуем авторов в словари
return [author.dict() for author in authors]
# Используем универсальную функцию для кеширования запросов
return await cached_query(cache_key, fetch_all_authors)
# Вспомогательная функция для получения авторов со статистикой с пагинацией
async def get_authors_with_stats(limit=50, offset=0, by: Optional[str] = None):
"""
Получает авторов со статистикой с пагинацией.
Args:
limit: Максимальное количество возвращаемых авторов
offset: Смещение для пагинации
by: Опциональный параметр сортировки (new/active)
Returns:
list: Список авторов с их статистикой
"""
# Формируем ключ кеша с помощью универсальной функции
cache_key = f"authors:stats:limit={limit}:offset={offset}"
# Функция для получения авторов из БД
async def fetch_authors_with_stats():
logger.debug(f"Выполняем запрос на получение авторов со статистикой: limit={limit}, offset={offset}, by={by}")
with local_session() as session:
# Базовый запрос для получения авторов
base_query = select(Author).where(Author.deleted_at.is_(None))
# Применяем сортировку
if by:
if isinstance(by, dict):
# Обработка словаря параметров сортировки
from sqlalchemy import asc, desc
for field, direction in by.items():
column = getattr(Author, field, None)
if column:
if direction.lower() == "desc":
base_query = base_query.order_by(desc(column))
else:
base_query = base_query.order_by(column)
elif by == "new":
base_query = base_query.order_by(desc(Author.created_at))
elif by == "active":
base_query = base_query.order_by(desc(Author.last_seen))
else:
# По умолчанию сортируем по времени создания
base_query = base_query.order_by(desc(Author.created_at))
else:
base_query = base_query.order_by(desc(Author.created_at))
# Применяем лимит и смещение
base_query = base_query.limit(limit).offset(offset)
# Получаем авторов
authors = session.execute(base_query).scalars().all()
author_ids = [author.id for author in authors]
if not author_ids:
return []
# Оптимизированный запрос для получения статистики по публикациям для авторов
shouts_stats_query = f"""
SELECT sa.author, COUNT(DISTINCT s.id) as shouts_count
FROM shout_author sa
JOIN shout s ON sa.shout = s.id AND s.deleted_at IS NULL AND s.published_at IS NOT NULL
WHERE sa.author IN ({",".join(map(str, author_ids))})
GROUP BY sa.author
"""
shouts_stats = {row[0]: row[1] for row in session.execute(text(shouts_stats_query))}
# Запрос на получение статистики по подписчикам для авторов
followers_stats_query = f"""
SELECT author, COUNT(DISTINCT follower) as followers_count
FROM author_follower
WHERE author IN ({",".join(map(str, author_ids))})
GROUP BY author
"""
followers_stats = {row[0]: row[1] for row in session.execute(text(followers_stats_query))}
# Формируем результат с добавлением статистики
result = []
for author in authors:
author_dict = author.dict()
author_dict["stat"] = {
"shouts": shouts_stats.get(author.id, 0),
"followers": followers_stats.get(author.id, 0),
}
result.append(author_dict)
# Кешируем каждого автора отдельно для использования в других функциях
await cache_author(author_dict)
return result
# Используем универсальную функцию для кеширования запросов
return await cached_query(cache_key, fetch_authors_with_stats)
# Функция для инвалидации кеша авторов
async def invalidate_authors_cache(author_id=None):
"""
Инвалидирует кеши авторов при изменении данных.
Args:
author_id: Опциональный ID автора для точечной инвалидации.
Если не указан, инвалидируются все кеши авторов.
"""
if author_id:
# Точечная инвалидация конкретного автора
logger.debug(f"Инвалидация кеша для автора #{author_id}")
specific_keys = [
f"author:id:{author_id}",
f"author:followers:{author_id}",
f"author:follows-authors:{author_id}",
f"author:follows-topics:{author_id}",
f"author:follows-shouts:{author_id}",
]
# Получаем user_id автора, если есть
with local_session() as session:
author = session.query(Author).filter(Author.id == author_id).first()
if author and author.user:
specific_keys.append(f"author:user:{author.user.strip()}")
# Удаляем конкретные ключи
for key in specific_keys:
try:
await redis.execute("DEL", key)
logger.debug(f"Удален ключ кеша {key}")
except Exception as e:
logger.error(f"Ошибка при удалении ключа {key}: {e}")
# Также ищем и удаляем ключи коллекций, содержащих данные об этом авторе
collection_keys = await redis.execute("KEYS", "authors:stats:*")
if collection_keys:
await redis.execute("DEL", *collection_keys)
logger.debug(f"Удалено {len(collection_keys)} коллекционных ключей авторов")
else:
# Общая инвалидация всех кешей авторов
logger.debug("Полная инвалидация кеша авторов")
await invalidate_cache_by_prefix("authors")
@mutation.field("update_author")
@login_required
async def update_author(_, info, profile):
user_id = info.context.get("user_id")
if not user_id:
return {"error": "unauthorized", "author": None}
try:
with local_session() as session:
author = session.query(Author).where(Author.user == user_id).first()
if author:
Author.update(author, profile)
session.add(author)
session.commit()
author_query = select(Author).where(Author.user == user_id)
result = get_with_stat(author_query)
if result:
author_with_stat = result[0]
if isinstance(author_with_stat, Author):
author_dict = author_with_stat.dict()
# await cache_author(author_dict)
asyncio.create_task(cache_author(author_dict))
return {"error": None, "author": author}
except Exception as exc:
import traceback
logger.error(traceback.format_exc())
return {"error": exc, "author": None}
@query.field("get_authors_all")
async def get_authors_all(_, _info):
"""
Получает список всех авторов без статистики.
Returns:
list: Список всех авторов
"""
return await get_all_authors()
@query.field("get_author")
async def get_author(_, _info, slug="", author_id=0):
author_dict = None
try:
author_id = get_author_id_from(slug=slug, user="", author_id=author_id)
if not author_id:
raise ValueError("cant find")
author_dict = await get_cached_author(int(author_id), get_with_stat)
if not author_dict or not author_dict.get("stat"):
# update stat from db
author_query = select(Author).filter(Author.id == author_id)
result = get_with_stat(author_query)
if result:
author_with_stat = result[0]
if isinstance(author_with_stat, Author):
author_dict = author_with_stat.dict()
# await cache_author(author_dict)
asyncio.create_task(cache_author(author_dict))
except ValueError:
pass
except Exception as exc:
import traceback
logger.error(f"{exc}:\n{traceback.format_exc()}")
return author_dict
@query.field("get_author_id")
async def get_author_id(_, _info, user: str):
user_id = user.strip()
logger.info(f"getting author id for {user_id}")
author = None
try:
author = await get_cached_author_by_user_id(user_id, get_with_stat)
if author:
return author
author_query = select(Author).filter(Author.user == user_id)
result = get_with_stat(author_query)
if result:
author_with_stat = result[0]
if isinstance(author_with_stat, Author):
author_dict = author_with_stat.dict()
# await cache_author(author_dict)
asyncio.create_task(cache_author(author_dict))
return author_with_stat
except Exception as exc:
logger.error(f"Error getting author: {exc}")
return None
@query.field("load_authors_by")
async def load_authors_by(_, _info, by, limit, offset):
"""
Загружает авторов по заданному критерию с пагинацией.
Args:
by: Критерий сортировки авторов (new/active)
limit: Максимальное количество возвращаемых авторов
offset: Смещение для пагинации
Returns:
list: Список авторов с учетом критерия
"""
# Используем оптимизированную функцию для получения авторов
return await get_authors_with_stats(limit, offset, by)
def get_author_id_from(slug="", user=None, author_id=None):
try:
author_id = None
if author_id:
return author_id
with local_session() as session:
author = None
if slug:
author = session.query(Author).filter(Author.slug == slug).first()
if author:
author_id = author.id
return author_id
if user:
author = session.query(Author).filter(Author.user == user).first()
if author:
author_id = author.id
except Exception as exc:
logger.error(exc)
return author_id
@query.field("get_author_follows")
async def get_author_follows(_, _info, slug="", user=None, author_id=0):
logger.debug(f"getting follows for @{slug}")
author_id = get_author_id_from(slug=slug, user=user, author_id=author_id)
if not author_id:
return {}
followed_authors = await get_cached_follower_authors(author_id)
followed_topics = await get_cached_follower_topics(author_id)
# TODO: Get followed communities too
return {
"authors": followed_authors,
"topics": followed_topics,
"communities": DEFAULT_COMMUNITIES,
"shouts": [],
}
@query.field("get_author_follows_topics")
async def get_author_follows_topics(_, _info, slug="", user=None, author_id=None):
logger.debug(f"getting followed topics for @{slug}")
author_id = get_author_id_from(slug=slug, user=user, author_id=author_id)
if not author_id:
return []
followed_topics = await get_cached_follower_topics(author_id)
return followed_topics
@query.field("get_author_follows_authors")
async def get_author_follows_authors(_, _info, slug="", user=None, author_id=None):
logger.debug(f"getting followed authors for @{slug}")
author_id = get_author_id_from(slug=slug, user=user, author_id=author_id)
if not author_id:
return []
followed_authors = await get_cached_follower_authors(author_id)
return followed_authors
def create_author(user_id: str, slug: str, name: str = ""):
author = Author()
author.user = user_id # Связь с user_id из системы авторизации
author.slug = slug # Идентификатор из системы авторизации
author.created_at = author.updated_at = int(time.time())
author.name = name or slug # если не указано
with local_session() as session:
session.add(author)
session.commit()
return author
@query.field("get_author_followers")
async def get_author_followers(_, _info, slug: str = "", user: str = "", author_id: int = 0):
logger.debug(f"getting followers for author @{slug} or ID:{author_id}")
author_id = get_author_id_from(slug=slug, user=user, author_id=author_id)
if not author_id:
return []
followers = await get_cached_author_followers(author_id)
return followers

View File

@ -1,83 +0,0 @@
from operator import and_
from graphql import GraphQLError
from sqlalchemy import delete, insert
from orm.author import AuthorBookmark
from orm.shout import Shout
from resolvers.feed import apply_options
from resolvers.reader import get_shouts_with_links, query_with_stat
from services.auth import login_required
from services.common_result import CommonResult
from services.db import local_session
from services.schema import mutation, query
@query.field("load_shouts_bookmarked")
@login_required
def load_shouts_bookmarked(_, info, options):
"""
Load bookmarked shouts for the authenticated user.
Args:
limit (int): Maximum number of shouts to return.
offset (int): Number of shouts to skip.
Returns:
list: List of bookmarked shouts.
"""
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not author_id:
raise GraphQLError("User not authenticated")
q = query_with_stat(info)
q = q.join(AuthorBookmark)
q = q.filter(
and_(
Shout.id == AuthorBookmark.shout,
AuthorBookmark.author == author_id,
)
)
q, limit, offset = apply_options(q, options, author_id)
return get_shouts_with_links(info, q, limit, offset)
@mutation.field("toggle_bookmark_shout")
def toggle_bookmark_shout(_, info, slug: str) -> CommonResult:
"""
Toggle bookmark status for a specific shout.
Args:
slug (str): Unique identifier of the shout.
Returns:
CommonResult: Result of the operation with bookmark status.
"""
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not author_id:
raise GraphQLError("User not authenticated")
with local_session() as db:
shout = db.query(Shout).filter(Shout.slug == slug).first()
if not shout:
raise GraphQLError("Shout not found")
existing_bookmark = (
db.query(AuthorBookmark)
.filter(AuthorBookmark.author == author_id, AuthorBookmark.shout == shout.id)
.first()
)
if existing_bookmark:
db.execute(
delete(AuthorBookmark).where(AuthorBookmark.author == author_id, AuthorBookmark.shout == shout.id)
)
result = False
else:
db.execute(insert(AuthorBookmark).values(author=author_id, shout=shout.id))
result = True
db.commit()
return result

View File

@ -1,147 +0,0 @@
from orm.author import Author
from orm.invite import Invite, InviteStatus
from orm.shout import Shout
from services.auth import login_required
from services.db import local_session
from services.schema import mutation
@mutation.field("accept_invite")
@login_required
async def accept_invite(_, info, invite_id: int):
info.context["user_id"]
author_dict = info.context["author"]
author_id = author_dict.get("id")
if author_id:
author_id = int(author_id)
# Check if the user exists
with local_session() as session:
# Check if the invite exists
invite = session.query(Invite).filter(Invite.id == invite_id).first()
if invite and invite.author_id is author_id and invite.status is InviteStatus.PENDING.value:
# Add the user to the shout authors
shout = session.query(Shout).filter(Shout.id == invite.shout_id).first()
if shout:
if author_id not in shout.authors:
author = session.query(Author).filter(Author.id == author_id).first()
if author:
shout.authors.append(author)
session.add(shout)
session.delete(invite)
session.commit()
return {"success": True, "message": "Invite accepted"}
else:
return {"error": "Shout not found"}
else:
return {"error": "Invalid invite or already accepted/rejected"}
else:
return {"error": "Unauthorized"}
@mutation.field("reject_invite")
@login_required
async def reject_invite(_, info, invite_id: int):
info.context["user_id"]
author_dict = info.context["author"]
author_id = author_dict.get("id")
if author_id:
# Check if the user exists
with local_session() as session:
author_id = int(author_id)
# Check if the invite exists
invite = session.query(Invite).filter(Invite.id == invite_id).first()
if invite and invite.author_id is author_id and invite.status is InviteStatus.PENDING.value:
# Delete the invite
session.delete(invite)
session.commit()
return {"success": True, "message": "Invite rejected"}
else:
return {"error": "Invalid invite or already accepted/rejected"}
return {"error": "User not found"}
@mutation.field("create_invite")
@login_required
async def create_invite(_, info, slug: str = "", author_id: int = 0):
user_id = info.context["user_id"]
author_dict = info.context["author"]
author_id = author_dict.get("id")
if author_id:
# Check if the inviter is the owner of the shout
with local_session() as session:
shout = session.query(Shout).filter(Shout.slug == slug).first()
inviter = session.query(Author).filter(Author.user == user_id).first()
if inviter and shout and shout.authors and inviter.id is shout.created_by:
# Check if an invite already exists
existing_invite = (
session.query(Invite)
.filter(
Invite.inviter_id == inviter.id,
Invite.author_id == author_id,
Invite.shout_id == shout.id,
Invite.status == InviteStatus.PENDING.value,
)
.first()
)
if existing_invite:
return {"error": "Invite already sent"}
# Create a new invite
new_invite = Invite(
inviter_id=user_id,
author_id=author_id,
shout_id=shout.id,
status=InviteStatus.PENDING.value,
)
session.add(new_invite)
session.commit()
return {"error": None, "invite": new_invite}
else:
return {"error": "Invalid author"}
else:
return {"error": "Access denied"}
@mutation.field("remove_author")
@login_required
async def remove_author(_, info, slug: str = "", author_id: int = 0):
user_id = info.context["user_id"]
with local_session() as session:
author = session.query(Author).filter(Author.user == user_id).first()
if author:
shout = session.query(Shout).filter(Shout.slug == slug).first()
# NOTE: owner should be first in a list
if shout and author.id is shout.created_by:
shout.authors = [author for author in shout.authors if author.id != author_id]
session.commit()
return {}
return {"error": "Access denied"}
@mutation.field("remove_invite")
@login_required
async def remove_invite(_, info, invite_id: int):
info.context["user_id"]
author_dict = info.context["author"]
author_id = author_dict.get("id")
if isinstance(author_id, int):
# Check if the user exists
with local_session() as session:
# Check if the invite exists
invite = session.query(Invite).filter(Invite.id == invite_id).first()
if isinstance(invite, Invite):
shout = session.query(Shout).filter(Shout.id == invite.shout_id).first()
if shout and shout.deleted_at is None and invite:
if invite.inviter_id is author_id or author_id == shout.created_by:
if invite.status is InviteStatus.PENDING.value:
# Delete the invite
session.delete(invite)
session.commit()
return {}
else:
return {"error": "Invalid invite or already accepted/rejected"}
else:
return {"error": "Author not found"}

View File

@ -1,97 +0,0 @@
from orm.author import Author
from orm.community import Community, CommunityFollower
from services.db import local_session
from services.schema import mutation, query
@query.field("get_communities_all")
async def get_communities_all(_, _info):
return local_session().query(Community).all()
@query.field("get_community")
async def get_community(_, _info, slug: str):
q = local_session().query(Community).where(Community.slug == slug)
return q.first()
@query.field("get_communities_by_author")
async def get_communities_by_author(_, _info, slug="", user="", author_id=0):
with local_session() as session:
q = session.query(Community).join(CommunityFollower)
if slug:
author_id = session.query(Author).where(Author.slug == slug).first().id
q = q.where(CommunityFollower.author == author_id)
if user:
author_id = session.query(Author).where(Author.user == user).first().id
q = q.where(CommunityFollower.author == author_id)
if author_id:
q = q.where(CommunityFollower.author == author_id)
return q.all()
return []
@mutation.field("join_community")
async def join_community(_, info, slug: str):
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
with local_session() as session:
community = session.query(Community).where(Community.slug == slug).first()
if not community:
return {"ok": False, "error": "Community not found"}
session.add(CommunityFollower(community=community.id, author=author_id))
session.commit()
return {"ok": True}
@mutation.field("leave_community")
async def leave_community(_, info, slug: str):
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
with local_session() as session:
session.query(CommunityFollower).where(
CommunityFollower.author == author_id, CommunityFollower.community == slug
).delete()
session.commit()
return {"ok": True}
@mutation.field("create_community")
async def create_community(_, info, community_data):
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
with local_session() as session:
session.add(Community(author=author_id, **community_data))
session.commit()
return {"ok": True}
@mutation.field("update_community")
async def update_community(_, info, community_data):
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
slug = community_data.get("slug")
if slug:
with local_session() as session:
try:
session.query(Community).where(Community.created_by == author_id, Community.slug == slug).update(
community_data
)
session.commit()
except Exception as e:
return {"ok": False, "error": str(e)}
return {"ok": True}
return {"ok": False, "error": "Please, set community slug in input"}
@mutation.field("delete_community")
async def delete_community(_, info, slug: str):
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
with local_session() as session:
try:
session.query(Community).where(Community.slug == slug, Community.created_by == author_id).delete()
session.commit()
return {"ok": True}
except Exception as e:
return {"ok": False, "error": str(e)}

179
resolvers/create/editor.py Normal file
View File

@ -0,0 +1,179 @@
from datetime import datetime, timezone
from sqlalchemy import and_
from sqlalchemy.orm import joinedload
from auth.authenticate import login_required
from auth.credentials import AuthCredentials
from base.orm import local_session
from base.resolvers import mutation
from orm.shout import Shout, ShoutAuthor, ShoutTopic
from orm.topic import Topic
from resolvers.zine.reactions import reactions_follow, reactions_unfollow
@mutation.field("createShout")
@login_required
async def create_shout(_, info, inp):
auth: AuthCredentials = info.context["request"].auth
with local_session() as session:
topics = session.query(Topic).filter(Topic.slug.in_(inp.get("topics", []))).all()
new_shout = Shout.create(
**{
"title": inp.get("title"),
"subtitle": inp.get("subtitle"),
"lead": inp.get("lead"),
"description": inp.get("description"),
"body": inp.get("body", ""),
"layout": inp.get("layout"),
"authors": inp.get("authors", []),
"slug": inp.get("slug"),
"mainTopic": inp.get("mainTopic"),
"visibility": "owner",
"createdBy": auth.user_id,
}
)
for topic in topics:
t = ShoutTopic.create(topic=topic.id, shout=new_shout.id)
session.add(t)
# NOTE: shout made by one first author
sa = ShoutAuthor.create(shout=new_shout.id, user=auth.user_id)
session.add(sa)
session.add(new_shout)
reactions_follow(auth.user_id, new_shout.id, True)
session.commit()
# TODO
# GitTask(inp, user.username, user.email, "new shout %s" % new_shout.slug)
if new_shout.slug is None:
new_shout.slug = f"draft-{new_shout.id}"
session.commit()
return {"shout": new_shout}
@mutation.field("updateShout")
@login_required
async def update_shout(_, info, shout_id, shout_input=None, publish=False): # noqa: C901
auth: AuthCredentials = info.context["request"].auth
with local_session() as session:
shout = (
session.query(Shout)
.options(
joinedload(Shout.authors),
joinedload(Shout.topics),
)
.filter(Shout.id == shout_id)
.first()
)
if not shout:
return {"error": "shout not found"}
if shout.createdBy != auth.user_id:
return {"error": "access denied"}
updated = False
if shout_input is not None:
topics_input = shout_input["topics"]
del shout_input["topics"]
new_topics_to_link = []
new_topics = [topic_input for topic_input in topics_input if topic_input["id"] < 0]
for new_topic in new_topics:
del new_topic["id"]
created_new_topic = Topic.create(**new_topic)
session.add(created_new_topic)
new_topics_to_link.append(created_new_topic)
if len(new_topics) > 0:
session.commit()
for new_topic_to_link in new_topics_to_link:
created_unlinked_topic = ShoutTopic.create(
shout=shout.id, topic=new_topic_to_link.id
)
session.add(created_unlinked_topic)
existing_topics_input = [
topic_input for topic_input in topics_input if topic_input.get("id", 0) > 0
]
existing_topic_to_link_ids = [
existing_topic_input["id"]
for existing_topic_input in existing_topics_input
if existing_topic_input["id"] not in [topic.id for topic in shout.topics]
]
for existing_topic_to_link_id in existing_topic_to_link_ids:
created_unlinked_topic = ShoutTopic.create(
shout=shout.id, topic=existing_topic_to_link_id
)
session.add(created_unlinked_topic)
topic_to_unlink_ids = [
topic.id
for topic in shout.topics
if topic.id not in [topic_input["id"] for topic_input in existing_topics_input]
]
shout_topics_to_remove = session.query(ShoutTopic).filter(
and_(ShoutTopic.shout == shout.id, ShoutTopic.topic.in_(topic_to_unlink_ids))
)
for shout_topic_to_remove in shout_topics_to_remove:
session.delete(shout_topic_to_remove)
shout_input["mainTopic"] = shout_input["mainTopic"]["slug"]
if shout_input["mainTopic"] == "":
del shout_input["mainTopic"]
shout.update(shout_input)
updated = True
if publish and shout.visibility == "owner":
shout.visibility = "community"
shout.publishedAt = datetime.now(tz=timezone.utc)
updated = True
if updated:
shout.updatedAt = datetime.now(tz=timezone.utc)
session.commit()
# GitTask(inp, user.username, user.email, "update shout %s" % slug)
return {"shout": shout}
@mutation.field("deleteShout")
@login_required
async def delete_shout(_, info, shout_id):
auth: AuthCredentials = info.context["request"].auth
with local_session() as session:
shout = session.query(Shout).filter(Shout.id == shout_id).first()
if not shout:
return {"error": "invalid shout id"}
if auth.user_id != shout.createdBy:
return {"error": "access denied"}
for author_id in shout.authors:
reactions_unfollow(author_id, shout_id)
shout.deletedAt = datetime.now(tz=timezone.utc)
session.commit()
return {}

View File

@ -1,475 +0,0 @@
import time
import trafilatura
from sqlalchemy.orm import joinedload
from cache.cache import (
cache_author,
cache_by_id,
cache_topic,
invalidate_shout_related_cache,
invalidate_shouts_cache,
)
from orm.author import Author
from orm.draft import Draft, DraftAuthor, DraftTopic
from orm.shout import Shout, ShoutAuthor, ShoutTopic
from orm.topic import Topic
from services.auth import login_required
from services.db import local_session
from services.notify import notify_shout
from services.schema import mutation, query
from services.search import search_service
from utils.html_wrapper import wrap_html_fragment
from utils.logger import root_logger as logger
def create_shout_from_draft(session, draft, author_id):
"""
Создаёт новый объект публикации (Shout) на основе черновика.
Args:
session: SQLAlchemy сессия (не используется, для совместимости)
draft (Draft): Объект черновика
author_id (int): ID автора публикации
Returns:
Shout: Новый объект публикации (не сохранённый в базе)
Пример:
>>> from orm.draft import Draft
>>> draft = Draft(id=1, title='Заголовок', body='Текст', slug='slug', created_by=1)
>>> shout = create_shout_from_draft(None, draft, 1)
>>> shout.title
'Заголовок'
>>> shout.body
'Текст'
>>> shout.created_by
1
"""
# Создаем новую публикацию
shout = Shout(
body=draft.body or "",
slug=draft.slug,
cover=draft.cover,
cover_caption=draft.cover_caption,
lead=draft.lead,
title=draft.title or "",
subtitle=draft.subtitle,
layout=draft.layout or "article",
media=draft.media or [],
lang=draft.lang or "ru",
seo=draft.seo,
created_by=author_id,
community=draft.community,
draft=draft.id,
deleted_at=None,
)
# Инициализируем пустые массивы для связей
shout.topics = []
shout.authors = []
return shout
@query.field("load_drafts")
@login_required
async def load_drafts(_, info):
"""
Загружает все черновики, доступные текущему пользователю.
Предварительно загружает связанные объекты (topics, authors, publication),
чтобы избежать ошибок с отсоединенными объектами при сериализации.
Returns:
dict: Список черновиков или сообщение об ошибке
"""
user_id = info.context.get("user_id")
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not user_id or not author_id:
return {"error": "User ID and author ID are required"}
try:
with local_session() as session:
# Предзагружаем authors, topics и связанную publication
drafts_query = (
session.query(Draft)
.options(
joinedload(Draft.topics),
joinedload(Draft.authors),
joinedload(Draft.publication) # Загружаем связанную публикацию
)
.filter(Draft.authors.any(Author.id == author_id))
)
drafts = drafts_query.all()
# Преобразуем объекты в словари, пока они в контексте сессии
drafts_data = []
for draft in drafts:
draft_dict = draft.dict()
# Всегда возвращаем массив для topics, даже если он пустой
draft_dict["topics"] = [topic.dict() for topic in (draft.topics or [])]
draft_dict["authors"] = [author.dict() for author in (draft.authors or [])]
# Добавляем информацию о публикации, если она есть
if draft.publication:
draft_dict["publication"] = {
"id": draft.publication.id,
"slug": draft.publication.slug,
"published_at": draft.publication.published_at
}
else:
draft_dict["publication"] = None
drafts_data.append(draft_dict)
return {"drafts": drafts_data}
except Exception as e:
logger.error(f"Failed to load drafts: {e}", exc_info=True)
return {"error": f"Failed to load drafts: {str(e)}"}
@mutation.field("create_draft")
@login_required
async def create_draft(_, info, draft_input):
"""Create a new draft.
Args:
info: GraphQL context
draft_input (dict): Draft data including optional fields:
- title (str, required) - заголовок черновика
- body (str, required) - текст черновика
- slug (str)
- etc.
Returns:
dict: Contains either:
- draft: The created draft object
- error: Error message if creation failed
Example:
>>> async def test_create():
... context = {'user_id': '123', 'author': {'id': 1}}
... info = type('Info', (), {'context': context})()
... result = await create_draft(None, info, {'title': 'Test'})
... assert result.get('error') is None
... assert result['draft'].title == 'Test'
... return result
"""
user_id = info.context.get("user_id")
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not user_id or not author_id:
return {"error": "Author ID is required"}
# Проверяем обязательные поля
if "body" not in draft_input or not draft_input["body"]:
draft_input["body"] = "" # Пустая строка вместо NULL
if "title" not in draft_input or not draft_input["title"]:
draft_input["title"] = "" # Пустая строка вместо NULL
# Проверяем slug - он должен быть или не пустым, или не передаваться вообще
if "slug" in draft_input and (draft_input["slug"] is None or draft_input["slug"] == ""):
# При создании черновика удаляем пустой slug из входных данных
del draft_input["slug"]
try:
with local_session() as session:
# Remove id from input if present since it's auto-generated
if "id" in draft_input:
del draft_input["id"]
# Добавляем текущее время создания и ID автора
draft_input["created_at"] = int(time.time())
draft_input["created_by"] = author_id
draft = Draft(**draft_input)
session.add(draft)
session.flush()
# Добавляем создателя как автора
da = DraftAuthor(shout=draft.id, author=author_id)
session.add(da)
session.commit()
return {"draft": draft}
except Exception as e:
logger.error(f"Failed to create draft: {e}", exc_info=True)
return {"error": f"Failed to create draft: {str(e)}"}
def generate_teaser(body, limit=300):
body_html = wrap_html_fragment(body)
body_text = trafilatura.extract(body_html, include_comments=False, include_tables=False)
body_teaser = ". ".join(body_text[:limit].split(". ")[:-1])
return body_teaser
@mutation.field("update_draft")
@login_required
async def update_draft(_, info, draft_id: int, draft_input):
"""Обновляет черновик публикации.
Args:
draft_id: ID черновика для обновления
draft_input: Данные для обновления черновика согласно схеме DraftInput:
- layout: String
- author_ids: [Int!]
- topic_ids: [Int!]
- main_topic_id: Int
- media: [MediaItemInput]
- lead: String
- subtitle: String
- lang: String
- seo: String
- body: String
- title: String
- slug: String
- cover: String
- cover_caption: String
Returns:
dict: Обновленный черновик или сообщение об ошибке
"""
user_id = info.context.get("user_id")
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not user_id or not author_id:
return {"error": "Author ID are required"}
try:
with local_session() as session:
draft = session.query(Draft).filter(Draft.id == draft_id).first()
if not draft:
return {"error": "Draft not found"}
# Фильтруем входные данные, оставляя только разрешенные поля
allowed_fields = {
"layout", "author_ids", "topic_ids", "main_topic_id",
"media", "lead", "subtitle", "lang", "seo", "body",
"title", "slug", "cover", "cover_caption"
}
filtered_input = {k: v for k, v in draft_input.items() if k in allowed_fields}
# Проверяем slug
if "slug" in filtered_input and not filtered_input["slug"]:
del filtered_input["slug"]
# Обновляем связи с авторами если переданы
if "author_ids" in filtered_input:
author_ids = filtered_input.pop("author_ids")
if author_ids:
# Очищаем текущие связи
session.query(DraftAuthor).filter(DraftAuthor.shout == draft_id).delete()
# Добавляем новые связи
for aid in author_ids:
da = DraftAuthor(shout=draft_id, author=aid)
session.add(da)
# Обновляем связи с темами если переданы
if "topic_ids" in filtered_input:
topic_ids = filtered_input.pop("topic_ids")
main_topic_id = filtered_input.pop("main_topic_id", None)
if topic_ids:
# Очищаем текущие связи
session.query(DraftTopic).filter(DraftTopic.shout == draft_id).delete()
# Добавляем новые связи
for tid in topic_ids:
dt = DraftTopic(
shout=draft_id,
topic=tid,
main=(tid == main_topic_id) if main_topic_id else False
)
session.add(dt)
# Генерируем SEO если не предоставлено
if "seo" not in filtered_input and not draft.seo:
body_src = filtered_input.get("body", draft.body)
lead_src = filtered_input.get("lead", draft.lead)
body_html = wrap_html_fragment(body_src)
lead_html = wrap_html_fragment(lead_src)
try:
body_text = trafilatura.extract(body_html, include_comments=False, include_tables=False) if body_src else None
lead_text = trafilatura.extract(lead_html, include_comments=False, include_tables=False) if lead_src else None
body_teaser = generate_teaser(body_text, 300) if body_text else ""
filtered_input["seo"] = lead_text if lead_text else body_teaser
except Exception as e:
logger.warning(f"Failed to generate SEO for draft {draft_id}: {e}")
# Обновляем основные поля черновика
for key, value in filtered_input.items():
setattr(draft, key, value)
# Обновляем метаданные
draft.updated_at = int(time.time())
draft.updated_by = author_id
session.commit()
# Преобразуем объект в словарь для ответа
draft_dict = draft.dict()
draft_dict["topics"] = [topic.dict() for topic in draft.topics]
draft_dict["authors"] = [author.dict() for author in draft.authors]
# Добавляем объект автора в updated_by
draft_dict["updated_by"] = author_dict
return {"draft": draft_dict}
except Exception as e:
logger.error(f"Failed to update draft: {e}", exc_info=True)
return {"error": f"Failed to update draft: {str(e)}"}
@mutation.field("delete_draft")
@login_required
async def delete_draft(_, info, draft_id: int):
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
with local_session() as session:
draft = session.query(Draft).filter(Draft.id == draft_id).first()
if not draft:
return {"error": "Draft not found"}
if author_id != draft.created_by and draft.authors.filter(Author.id == author_id).count() == 0:
return {"error": "You are not allowed to delete this draft"}
session.delete(draft)
session.commit()
return {"draft": draft}
def validate_html_content(html_content: str) -> tuple[bool, str]:
"""
Проверяет валидность HTML контента через trafilatura.
Args:
html_content: HTML строка для проверки
Returns:
tuple[bool, str]: (валидность, сообщение об ошибке)
Example:
>>> is_valid, error = validate_html_content("<p>Valid HTML</p>")
>>> is_valid
True
>>> error
''
>>> is_valid, error = validate_html_content("Invalid < HTML")
>>> is_valid
False
>>> 'Invalid HTML' in error
True
"""
if not html_content or not html_content.strip():
return False, "Content is empty"
try:
html_content = wrap_html_fragment(html_content)
extracted = trafilatura.extract(html_content)
if not extracted:
return False, "Invalid HTML structure or empty content"
return True, ""
except Exception as e:
logger.error(f"HTML validation error: {e}", exc_info=True)
return False, f"Invalid HTML content: {str(e)}"
@mutation.field("publish_draft")
@login_required
async def publish_draft(_, info, draft_id: int):
"""
Публикует черновик, создавая новый Shout или обновляя существующий.
Args:
draft_id (int): ID черновика для публикации
Returns:
dict: Результат публикации с shout или сообщением об ошибке
"""
user_id = info.context.get("user_id")
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not user_id or not author_id:
return {"error": "Author ID is required"}
try:
with local_session() as session:
# Загружаем черновик со всеми связями
draft = (
session.query(Draft)
.options(
joinedload(Draft.topics),
joinedload(Draft.authors),
joinedload(Draft.publication)
)
.filter(Draft.id == draft_id)
.first()
)
if not draft:
return {"error": "Draft not found"}
# Проверка валидности HTML в body
is_valid, error = validate_html_content(draft.body)
if not is_valid:
return {"error": f"Cannot publish draft: {error}"}
# Проверяем, есть ли уже публикация для этого черновика
if draft.publication:
shout = draft.publication
# Обновляем существующую публикацию
for field in ["body", "title", "subtitle", "lead", "cover", "cover_caption", "media", "lang", "seo"]:
if hasattr(draft, field):
setattr(shout, field, getattr(draft, field))
shout.updated_at = int(time.time())
shout.updated_by = author_id
else:
# Создаем новую публикацию
shout = create_shout_from_draft(session, draft, author_id)
now = int(time.time())
shout.created_at = now
shout.published_at = now
session.add(shout)
session.flush() # Получаем ID нового шаута
# Очищаем существующие связи
session.query(ShoutAuthor).filter(ShoutAuthor.shout == shout.id).delete()
session.query(ShoutTopic).filter(ShoutTopic.shout == shout.id).delete()
# Добавляем авторов
for author in (draft.authors or []):
sa = ShoutAuthor(shout=shout.id, author=author.id)
session.add(sa)
# Добавляем темы
for topic in (draft.topics or []):
st = ShoutTopic(
topic=topic.id,
shout=shout.id,
main=topic.main if hasattr(topic, "main") else False
)
session.add(st)
session.commit()
# Инвалидируем кеш
invalidate_shouts_cache()
invalidate_shout_related_cache(shout.id)
# Уведомляем о публикации
await notify_shout(shout.id)
# Обновляем поисковый индекс
search_service.index_shout(shout)
logger.info(f"Successfully published shout #{shout.id} from draft #{draft_id}")
logger.debug(f"Shout data: {shout.dict()}")
return {"shout": shout}
except Exception as e:
logger.error(f"Failed to publish draft {draft_id}: {e}", exc_info=True)
return {"error": f"Failed to publish draft: {str(e)}"}

View File

@ -1,826 +0,0 @@
import time
import orjson
import trafilatura
from sqlalchemy import and_, desc, select
from sqlalchemy.orm import joinedload, selectinload
from sqlalchemy.sql.functions import coalesce
from cache.cache import (
cache_author,
cache_topic,
invalidate_shout_related_cache,
invalidate_shouts_cache,
)
from orm.author import Author
from orm.draft import Draft
from orm.shout import Shout, ShoutAuthor, ShoutTopic
from orm.topic import Topic
from resolvers.follower import follow, unfollow
from resolvers.stat import get_with_stat
from services.auth import login_required
from services.db import local_session
from services.notify import notify_shout
from services.schema import mutation, query
from services.search import search_service
from utils.html_wrapper import wrap_html_fragment
from utils.logger import root_logger as logger
async def cache_by_id(entity, entity_id: int, cache_method):
"""Cache an entity by its ID using the provided cache method.
Args:
entity: The SQLAlchemy model class to query
entity_id (int): The ID of the entity to cache
cache_method: The caching function to use
Returns:
dict: The cached entity data if successful, None if entity not found
Example:
>>> async def test_cache():
... author = await cache_by_id(Author, 1, cache_author)
... assert author['id'] == 1
... assert 'name' in author
... return author
"""
caching_query = select(entity).filter(entity.id == entity_id)
result = get_with_stat(caching_query)
if not result or not result[0]:
logger.warning(f"{entity.__name__} with id {entity_id} not found")
return
x = result[0]
d = x.dict() # convert object to dictionary
cache_method(d)
return d
@query.field("get_my_shout")
@login_required
async def get_my_shout(_, info, shout_id: int):
"""Get a shout by ID if the requesting user has permission to view it.
DEPRECATED: use `load_drafts` instead
Args:
info: GraphQL resolver info containing context
shout_id (int): ID of the shout to retrieve
Returns:
dict: Contains either:
- error (str): Error message if retrieval failed
- shout (Shout): The requested shout if found and accessible
Permissions:
User must be:
- The shout creator
- Listed as an author
- Have editor role
Example:
>>> async def test_get_my_shout():
... context = {'user_id': '123', 'author': {'id': 1}, 'roles': []}
... info = type('Info', (), {'context': context})()
... result = await get_my_shout(None, info, 1)
... assert result['error'] is None
... assert result['shout'].id == 1
... return result
"""
user_id = info.context.get("user_id", "")
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
roles = info.context.get("roles", [])
shout = None
if not user_id or not author_id:
return {"error": "unauthorized", "shout": None}
with local_session() as session:
shout = (
session.query(Shout)
.filter(Shout.id == shout_id)
.options(joinedload(Shout.authors), joinedload(Shout.topics))
.filter(Shout.deleted_at.is_(None))
.first()
)
if not shout:
return {"error": "no shout found", "shout": None}
# Преобразуем media JSON в список объектов MediaItem
if hasattr(shout, "media") and shout.media:
if isinstance(shout.media, str):
try:
shout.media = orjson.loads(shout.media)
except Exception as e:
logger.error(f"Error parsing shout media: {e}")
shout.media = []
if not isinstance(shout.media, list):
shout.media = [shout.media] if shout.media else []
else:
shout.media = []
logger.debug(f"got {len(shout.authors)} shout authors, created by {shout.created_by}")
is_editor = "editor" in roles
logger.debug(f"viewer is{'' if is_editor else ' not'} editor")
is_creator = author_id == shout.created_by
logger.debug(f"viewer is{'' if is_creator else ' not'} creator")
is_author = bool(list(filter(lambda x: x.id == int(author_id), [x for x in shout.authors])))
logger.debug(f"viewer is{'' if is_creator else ' not'} author")
can_edit = is_editor or is_author or is_creator
if not can_edit:
return {"error": "forbidden", "shout": None}
logger.debug("got shout editor with data")
return {"error": None, "shout": shout}
@query.field("get_shouts_drafts")
@login_required
async def get_shouts_drafts(_, info):
# user_id = info.context.get("user_id")
author_dict = info.context.get("author")
if not author_dict:
return {"error": "author profile was not found"}
author_id = author_dict.get("id")
shouts = []
with local_session() as session:
if author_id:
q = (
select(Shout)
.options(joinedload(Shout.authors), joinedload(Shout.topics))
.filter(and_(Shout.deleted_at.is_(None), Shout.created_by == int(author_id)))
.filter(Shout.published_at.is_(None))
.order_by(desc(coalesce(Shout.updated_at, Shout.created_at)))
.group_by(Shout.id)
)
shouts = [shout for [shout] in session.execute(q).unique()]
return {"shouts": shouts}
# @mutation.field("create_shout")
# @login_required
async def create_shout(_, info, inp):
logger.info(f"Starting create_shout with input: {inp}")
user_id = info.context.get("user_id")
author_dict = info.context.get("author")
logger.debug(f"Context user_id: {user_id}, author: {author_dict}")
if not author_dict:
logger.error("Author profile not found in context")
return {"error": "author profile was not found"}
author_id = author_dict.get("id")
if user_id and author_id:
try:
with local_session() as session:
author_id = int(author_id)
current_time = int(time.time())
slug = inp.get("slug") or f"draft-{current_time}"
logger.info(f"Creating shout with input: {inp}")
# Создаем публикацию без topics
body = inp.get("body", "")
lead = inp.get("lead", "")
body_html = wrap_html_fragment(body)
lead_html = wrap_html_fragment(lead)
body_text = trafilatura.extract(body_html)
lead_text = trafilatura.extract(lead_html)
seo = inp.get("seo", lead_text.strip() or body_text.strip()[:300].split(". ")[:-1].join(". "))
new_shout = Shout(
slug=slug,
body=body,
seo=seo,
lead=lead,
layout=inp.get("layout", "article"),
title=inp.get("title", ""),
created_by=author_id,
created_at=current_time,
community=1,
)
# Проверяем уникальность slug
logger.debug(f"Checking for existing slug: {slug}")
same_slug_shout = session.query(Shout).filter(Shout.slug == new_shout.slug).first()
c = 1
while same_slug_shout is not None:
logger.debug(f"Found duplicate slug, trying iteration {c}")
new_shout.slug = f"{slug}-{c}"
same_slug_shout = session.query(Shout).filter(Shout.slug == new_shout.slug).first()
c += 1
try:
logger.info("Creating new shout object")
session.add(new_shout)
session.commit()
logger.info(f"Created shout with ID: {new_shout.id}")
except Exception as e:
logger.error(f"Error creating shout object: {e}", exc_info=True)
return {"error": f"Database error: {str(e)}"}
# Связываем с автором
try:
logger.debug(f"Linking author {author_id} to shout {new_shout.id}")
sa = ShoutAuthor(shout=new_shout.id, author=author_id)
session.add(sa)
except Exception as e:
logger.error(f"Error linking author: {e}", exc_info=True)
return {"error": f"Error linking author: {str(e)}"}
# Связываем с темами
input_topics = inp.get("topics", [])
if input_topics:
try:
logger.debug(f"Linking topics: {[t.slug for t in input_topics]}")
main_topic = inp.get("main_topic")
for topic in input_topics:
st = ShoutTopic(
topic=topic.id,
shout=new_shout.id,
main=(topic.slug == main_topic) if main_topic else False,
)
session.add(st)
logger.debug(f"Added topic {topic.slug} {'(main)' if st.main else ''}")
except Exception as e:
logger.error(f"Error linking topics: {e}", exc_info=True)
return {"error": f"Error linking topics: {str(e)}"}
try:
session.commit()
logger.info("Final commit successful")
except Exception as e:
logger.error(f"Error in final commit: {e}", exc_info=True)
return {"error": f"Error in final commit: {str(e)}"}
# Получаем созданную публикацию
shout = session.query(Shout).filter(Shout.id == new_shout.id).first()
# Подписываем автора
try:
logger.debug("Following created shout")
await follow(None, info, "shout", shout.slug)
except Exception as e:
logger.warning(f"Error following shout: {e}", exc_info=True)
logger.info(f"Successfully created shout {shout.id}")
return {"shout": shout}
except Exception as e:
logger.error(f"Unexpected error in create_shout: {e}", exc_info=True)
return {"error": f"Unexpected error: {str(e)}"}
error_msg = "cant create shout" if user_id else "unauthorized"
logger.error(f"Create shout failed: {error_msg}")
return {"error": error_msg}
def patch_main_topic(session, main_topic_slug, shout):
"""Update the main topic for a shout."""
logger.info(f"Starting patch_main_topic for shout#{shout.id} with slug '{main_topic_slug}'")
logger.debug(f"Current shout topics: {[(t.topic.slug, t.main) for t in shout.topics]}")
with session.begin():
# Получаем текущий главный топик
old_main = (
session.query(ShoutTopic).filter(and_(ShoutTopic.shout == shout.id, ShoutTopic.main.is_(True))).first()
)
if old_main:
logger.info(f"Found current main topic: {old_main.topic.slug}")
else:
logger.info("No current main topic found")
# Находим новый главный топик
main_topic = session.query(Topic).filter(Topic.slug == main_topic_slug).first()
if not main_topic:
logger.error(f"Main topic with slug '{main_topic_slug}' not found")
return
logger.info(f"Found new main topic: {main_topic.slug} (id={main_topic.id})")
# Находим связь с новым главным топиком
new_main = (
session.query(ShoutTopic)
.filter(and_(ShoutTopic.shout == shout.id, ShoutTopic.topic == main_topic.id))
.first()
)
logger.debug(f"Found new main topic relation: {new_main is not None}")
if old_main and new_main and old_main is not new_main:
logger.info(f"Updating main topic flags: {old_main.topic.slug} -> {new_main.topic.slug}")
old_main.main = False
session.add(old_main)
new_main.main = True
session.add(new_main)
session.flush()
logger.info(f"Main topic updated for shout#{shout.id}")
else:
logger.warning(f"No changes needed for main topic (old={old_main is not None}, new={new_main is not None})")
def patch_topics(session, shout, topics_input):
"""Update the topics associated with a shout.
Args:
session: SQLAlchemy session
shout (Shout): The shout to update
topics_input (list): List of topic dicts with fields:
- id (int): Topic ID (<0 for new topics)
- slug (str): Topic slug
- title (str): Topic title (for new topics)
Side Effects:
- Creates new topics if needed
- Updates shout-topic associations
- Refreshes shout object with new topics
Example:
>>> def test_patch_topics():
... topics = [
... {'id': -1, 'slug': 'new-topic', 'title': 'New Topic'},
... {'id': 1, 'slug': 'existing-topic'}
... ]
... with local_session() as session:
... shout = session.query(Shout).first()
... patch_topics(session, shout, topics)
... assert len(shout.topics) == 2
... assert any(t.slug == 'new-topic' for t in shout.topics)
... return shout.topics
"""
logger.info(f"Starting patch_topics for shout#{shout.id}")
logger.info(f"Received topics_input: {topics_input}")
# Создаем новые топики если есть
new_topics_to_link = [Topic(**new_topic) for new_topic in topics_input if new_topic["id"] < 0]
if new_topics_to_link:
logger.info(f"Creating new topics: {[t.dict() for t in new_topics_to_link]}")
session.add_all(new_topics_to_link)
session.flush()
# Получаем текущие связи
current_links = session.query(ShoutTopic).filter(ShoutTopic.shout == shout.id).all()
logger.info(f"Current topic links: {[{t.topic: t.main} for t in current_links]}")
# Удаляем старые связи
if current_links:
logger.info(f"Removing old topic links for shout#{shout.id}")
for link in current_links:
session.delete(link)
session.flush()
# Создаем новые связи
for topic_input in topics_input:
topic_id = topic_input["id"]
if topic_id < 0:
topic = next(t for t in new_topics_to_link if t.slug == topic_input["slug"])
topic_id = topic.id
logger.info(f"Creating new topic link: shout#{shout.id} -> topic#{topic_id}")
new_link = ShoutTopic(shout=shout.id, topic=topic_id, main=False)
session.add(new_link)
session.flush()
# Обновляем связи в объекте шаута
session.refresh(shout)
logger.info(f"Successfully updated topics for shout#{shout.id}")
logger.info(f"Final shout topics: {[t.dict() for t in shout.topics]}")
# @mutation.field("update_shout")
# @login_required
async def update_shout(_, info, shout_id: int, shout_input=None, publish=False):
logger.info(f"Starting update_shout with id={shout_id}, publish={publish}")
logger.debug(f"Full shout_input: {shout_input}") # DraftInput
user_id = info.context.get("user_id")
roles = info.context.get("roles", [])
author_dict = info.context.get("author")
if not author_dict:
logger.error("Author profile not found")
return {"error": "author profile was not found"}
author_id = author_dict.get("id")
shout_input = shout_input or {}
current_time = int(time.time())
shout_id = shout_id or shout_input.get("id", shout_id)
slug = shout_input.get("slug")
if not user_id:
logger.error("Unauthorized update attempt")
return {"error": "unauthorized"}
try:
with local_session() as session:
if author_id:
logger.info(f"Processing update for shout#{shout_id} by author #{author_id}")
shout_by_id = (
session.query(Shout)
.options(joinedload(Shout.topics).joinedload(ShoutTopic.topic), joinedload(Shout.authors))
.filter(Shout.id == shout_id)
.first()
)
if not shout_by_id:
logger.error(f"shout#{shout_id} not found")
return {"error": "shout not found"}
logger.info(f"Found shout#{shout_id}")
# Логируем текущие топики
current_topics = (
[{"id": t.id, "slug": t.slug, "title": t.title} for t in shout_by_id.topics]
if shout_by_id.topics
else []
)
logger.info(f"Current topics for shout#{shout_id}: {current_topics}")
if slug != shout_by_id.slug:
same_slug_shout = session.query(Shout).filter(Shout.slug == slug).first()
c = 1
while same_slug_shout is not None:
c += 1
slug = f"{slug}-{c}"
same_slug_shout = session.query(Shout).filter(Shout.slug == slug).first()
shout_input["slug"] = slug
logger.info(f"shout#{shout_id} slug patched")
if filter(lambda x: x.id == author_id, [x for x in shout_by_id.authors]) or "editor" in roles:
logger.info(f"Author #{author_id} has permission to edit shout#{shout_id}")
# topics patch
topics_input = shout_input.get("topics")
if topics_input:
logger.info(f"Received topics_input for shout#{shout_id}: {topics_input}")
try:
patch_topics(session, shout_by_id, topics_input)
logger.info(f"Successfully patched topics for shout#{shout_id}")
# Обновляем связи в сессии после patch_topics
session.refresh(shout_by_id)
except Exception as e:
logger.error(f"Error patching topics: {e}", exc_info=True)
return {"error": f"Failed to update topics: {str(e)}"}
del shout_input["topics"]
for tpc in topics_input:
await cache_by_id(Topic, tpc["id"], cache_topic)
else:
logger.warning(f"No topics_input received for shout#{shout_id}")
# main topic
main_topic = shout_input.get("main_topic")
if main_topic:
logger.info(f"Updating main topic for shout#{shout_id} to {main_topic}")
patch_main_topic(session, main_topic, shout_by_id)
shout_input["updated_at"] = current_time
if publish:
logger.info(f"Publishing shout#{shout_id}")
shout_input["published_at"] = current_time
# Проверяем наличие связи с автором
logger.info(f"Checking author link for shout#{shout_id} and author#{author_id}")
author_link = (
session.query(ShoutAuthor)
.filter(and_(ShoutAuthor.shout == shout_id, ShoutAuthor.author == author_id))
.first()
)
if not author_link:
logger.info(f"Adding missing author link for shout#{shout_id}")
sa = ShoutAuthor(shout=shout_id, author=author_id)
session.add(sa)
session.flush()
logger.info("Author link added successfully")
else:
logger.info("Author link already exists")
# Логируем финальное состояние перед сохранением
logger.info(f"Final shout_input for update: {shout_input}")
Shout.update(shout_by_id, shout_input)
session.add(shout_by_id)
try:
session.commit()
# Обновляем объект после коммита чтобы получить все связи
session.refresh(shout_by_id)
logger.info(f"Successfully committed updates for shout#{shout_id}")
except Exception as e:
logger.error(f"Commit failed: {e}", exc_info=True)
return {"error": f"Failed to save changes: {str(e)}"}
# После обновления проверяем топики
updated_topics = (
[{"id": t.id, "slug": t.slug, "title": t.title} for t in shout_by_id.topics]
if shout_by_id.topics
else []
)
logger.info(f"Updated topics for shout#{shout_id}: {updated_topics}")
# Инвалидация кэша после обновления
try:
logger.info("Invalidating cache after shout update")
cache_keys = [
"feed", # лента
f"author_{author_id}", # публикации автора
"random_top", # случайные топовые
"unrated", # неоцененные
]
# Добавляем ключи для тем публикации
for topic in shout_by_id.topics:
cache_keys.append(f"topic_{topic.id}")
cache_keys.append(f"topic_shouts_{topic.id}")
await invalidate_shouts_cache(cache_keys)
await invalidate_shout_related_cache(shout_by_id, author_id)
# Обновляем кэш тем и авторов
for topic in shout_by_id.topics:
await cache_by_id(Topic, topic.id, cache_topic)
for author in shout_by_id.authors:
await cache_author(author.dict())
logger.info("Cache invalidated successfully")
except Exception as cache_error:
logger.warning(f"Cache invalidation error: {cache_error}", exc_info=True)
if not publish:
await notify_shout(shout_by_id.dict(), "update")
else:
await notify_shout(shout_by_id.dict(), "published")
# search service indexing
search_service.index(shout_by_id)
for a in shout_by_id.authors:
await cache_by_id(Author, a.id, cache_author)
logger.info(f"shout#{shout_id} updated")
# Получаем полные данные шаута со связями
shout_with_relations = (
session.query(Shout)
.options(joinedload(Shout.topics).joinedload(ShoutTopic.topic), joinedload(Shout.authors))
.filter(Shout.id == shout_id)
.first()
)
# Создаем словарь с базовыми полями
shout_dict = shout_with_relations.dict()
# Явно добавляем связанные данные
shout_dict["topics"] = (
[
{"id": topic.id, "slug": topic.slug, "title": topic.title}
for topic in shout_with_relations.topics
]
if shout_with_relations.topics
else []
)
# Add main_topic to the shout dictionary
shout_dict["main_topic"] = get_main_topic(shout_with_relations.topics)
shout_dict["authors"] = (
[
{"id": author.id, "name": author.name, "slug": author.slug}
for author in shout_with_relations.authors
]
if shout_with_relations.authors
else []
)
logger.info(f"Final shout data with relations: {shout_dict}")
logger.debug(
f"Loaded topics details: {[(t.topic.slug if t.topic else 'no-topic', t.main) for t in shout_with_relations.topics]}"
)
return {"shout": shout_dict, "error": None}
else:
logger.warning(f"Access denied: author #{author_id} cannot edit shout#{shout_id}")
return {"error": "access denied", "shout": None}
except Exception as exc:
logger.error(f"Unexpected error in update_shout: {exc}", exc_info=True)
logger.error(f"Failed input data: {shout_input}")
return {"error": "cant update shout"}
return {"error": "cant update shout"}
# @mutation.field("delete_shout")
# @login_required
async def delete_shout(_, info, shout_id: int):
user_id = info.context.get("user_id")
roles = info.context.get("roles", [])
author_dict = info.context.get("author")
if not author_dict:
return {"error": "author profile was not found"}
author_id = author_dict.get("id")
if user_id and author_id:
author_id = int(author_id)
with local_session() as session:
shout = session.query(Shout).filter(Shout.id == shout_id).first()
if not isinstance(shout, Shout):
return {"error": "invalid shout id"}
shout_dict = shout.dict()
# NOTE: only owner and editor can mark the shout as deleted
if shout_dict["created_by"] == author_id or "editor" in roles:
shout_dict["deleted_at"] = int(time.time())
Shout.update(shout, shout_dict)
session.add(shout)
session.commit()
for author in shout.authors:
await cache_by_id(Author, author.id, cache_author)
info.context["author"] = author.dict()
info.context["user_id"] = author.user
unfollow(None, info, "shout", shout.slug)
for topic in shout.topics:
await cache_by_id(Topic, topic.id, cache_topic)
await notify_shout(shout_dict, "delete")
return {"error": None}
else:
return {"error": "access denied"}
def get_main_topic(topics):
"""Get the main topic from a list of ShoutTopic objects."""
logger.info(f"Starting get_main_topic with {len(topics) if topics else 0} topics")
logger.debug(
f"Topics data: {[(t.slug, getattr(t, 'main', False)) for t in topics] if topics else []}"
)
if not topics:
logger.warning("No topics provided to get_main_topic")
return {"id": 0, "title": "no topic", "slug": "notopic", "is_main": True}
# Проверяем, является ли topics списком объектов ShoutTopic или Topic
if hasattr(topics[0], 'topic') and topics[0].topic:
# Для ShoutTopic объектов (старый формат)
# Find first main topic in original order
main_topic_rel = next((st for st in topics if getattr(st, 'main', False)), None)
logger.debug(
f"Found main topic relation: {main_topic_rel.topic.slug if main_topic_rel and main_topic_rel.topic else None}"
)
if main_topic_rel and main_topic_rel.topic:
result = {
"slug": main_topic_rel.topic.slug,
"title": main_topic_rel.topic.title,
"id": main_topic_rel.topic.id,
"is_main": True,
}
logger.info(f"Returning main topic: {result}")
return result
# If no main found but topics exist, return first
if topics and topics[0].topic:
logger.info(f"No main topic found, using first topic: {topics[0].topic.slug}")
result = {
"slug": topics[0].topic.slug,
"title": topics[0].topic.title,
"id": topics[0].topic.id,
"is_main": True,
}
return result
else:
# Для Topic объектов (новый формат из selectinload)
# После смены на selectinload у нас просто список Topic объектов
if topics:
logger.info(f"Using first topic as main: {topics[0].slug}")
result = {
"slug": topics[0].slug,
"title": topics[0].title,
"id": topics[0].id,
"is_main": True,
}
return result
logger.warning("No valid topics found, returning default")
return {"slug": "notopic", "title": "no topic", "id": 0, "is_main": True}
@mutation.field("unpublish_shout")
@login_required
async def unpublish_shout(_, info, shout_id: int):
"""Снимает публикацию (shout) с публикации.
Предзагружает связанный черновик (draft) и его авторов/темы, чтобы избежать
ошибок при последующем доступе к ним в GraphQL.
Args:
shout_id: ID публикации для снятия с публикации
Returns:
dict: Снятая с публикации публикация или сообщение об ошибке
"""
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not author_id:
# В идеале нужна проверка прав, имеет ли автор право снимать публикацию
return {"error": "Author ID is required"}
shout = None
with local_session() as session:
try:
# Загружаем Shout со всеми связями для правильного формирования ответа
shout = (
session.query(Shout)
.options(
joinedload(Shout.authors),
selectinload(Shout.topics)
)
.filter(Shout.id == shout_id)
.first()
)
if not shout:
logger.warning(f"Shout not found for unpublish: ID {shout_id}")
return {"error": "Shout not found"}
# Если у публикации есть связанный черновик, загружаем его с relationships
if shout.draft:
# Отдельно загружаем черновик с его связями
draft = (
session.query(Draft)
.options(
selectinload(Draft.authors),
selectinload(Draft.topics)
)
.filter(Draft.id == shout.draft)
.first()
)
# Связываем черновик с публикацией вручную для доступа через API
if draft:
shout.draft_obj = draft
# TODO: Добавить проверку прав доступа, если необходимо
# if author_id not in [a.id for a in shout.authors]: # Требует selectinload(Shout.authors) выше
# logger.warning(f"Author {author_id} denied unpublishing shout {shout_id}")
# return {"error": "Access denied"}
# Запоминаем старый slug и id для формирования поля publication
shout_slug = shout.slug
shout_id_for_publication = shout.id
# Снимаем с публикации (устанавливаем published_at в None)
shout.published_at = None
session.commit()
# Формируем полноценный словарь для ответа
shout_dict = shout.dict()
# Добавляем связанные данные
shout_dict["topics"] = (
[
{"id": topic.id, "slug": topic.slug, "title": topic.title}
for topic in shout.topics
]
if shout.topics
else []
)
# Добавляем main_topic
shout_dict["main_topic"] = get_main_topic(shout.topics)
# Добавляем авторов
shout_dict["authors"] = (
[
{"id": author.id, "name": author.name, "slug": author.slug}
for author in shout.authors
]
if shout.authors
else []
)
# Важно! Обновляем поле publication, отражая состояние "снят с публикации"
shout_dict["publication"] = {
"id": shout_id_for_publication,
"slug": shout_slug,
"published_at": None # Ключевое изменение - устанавливаем published_at в None
}
# Инвалидация кэша
try:
cache_keys = [
"feed", # лента
f"author_{author_id}", # публикации автора
"random_top", # случайные топовые
"unrated", # неоцененные
]
await invalidate_shout_related_cache(shout, author_id)
await invalidate_shouts_cache(cache_keys)
logger.info(f"Cache invalidated after unpublishing shout {shout_id}")
except Exception as cache_err:
logger.error(f"Failed to invalidate cache for unpublish shout {shout_id}: {cache_err}")
except Exception as e:
session.rollback()
logger.error(f"Failed to unpublish shout {shout_id}: {e}", exc_info=True)
return {"error": f"Failed to unpublish shout: {str(e)}"}
# Возвращаем сформированный словарь вместо объекта
logger.info(f"Shout {shout_id} unpublished successfully by author {author_id}")
return {"shout": shout_dict}

View File

@ -1,198 +0,0 @@
from typing import List
from sqlalchemy import and_, select
from orm.author import Author, AuthorFollower
from orm.shout import Shout, ShoutAuthor, ShoutReactionsFollower, ShoutTopic
from orm.topic import Topic, TopicFollower
from resolvers.reader import (
apply_options,
get_shouts_with_links,
has_field,
query_with_stat,
)
from services.auth import login_required
from services.db import local_session
from services.schema import query
from utils.logger import root_logger as logger
@query.field("load_shouts_coauthored")
@login_required
async def load_shouts_coauthored(_, info, options):
"""
Загрузка публикаций, написанных в соавторстве с пользователем.
:param info: Информаци о контексте GraphQL.
:param options: Опции фильтрации и сортировки.
:return: Список публикаций в соавтостве.
"""
author_id = info.context.get("author", {}).get("id")
if not author_id:
return []
q = query_with_stat(info)
q = q.filter(Shout.authors.any(id=author_id))
q, limit, offset = apply_options(q, options)
return get_shouts_with_links(info, q, limit, offset=offset)
@query.field("load_shouts_discussed")
@login_required
async def load_shouts_discussed(_, info, options):
"""
Загрузка публикаций, которые обсуждались пользователем.
:param info: Информация о контексте GraphQL.
:param options: Опции фильтрации и сортировки.
:return: Список публикаций, обсужденых пользователем.
"""
author_id = info.context.get("author", {}).get("id")
if not author_id:
return []
q = query_with_stat(info)
options["filters"]["commented"] = True
q, limit, offset = apply_options(q, options, author_id)
return get_shouts_with_links(info, q, limit, offset=offset)
def shouts_by_follower(info, follower_id: int, options):
"""
Загружает публикации, на которые подписан автор.
- по авторам
- по темам
- по реакциям
:param info: Информация о контексте GraphQL.
:param follower_id: Идентификатор автора.
:param options: Опции фильтрации и сортировки.
:return: Список публикаций.
"""
q = query_with_stat(info)
reader_followed_authors = select(AuthorFollower.author).where(AuthorFollower.follower == follower_id)
reader_followed_topics = select(TopicFollower.topic).where(TopicFollower.follower == follower_id)
reader_followed_shouts = select(ShoutReactionsFollower.shout).where(ShoutReactionsFollower.follower == follower_id)
followed_subquery = (
select(Shout.id)
.join(ShoutAuthor, ShoutAuthor.shout == Shout.id)
.join(ShoutTopic, ShoutTopic.shout == Shout.id)
.where(
ShoutAuthor.author.in_(reader_followed_authors)
| ShoutTopic.topic.in_(reader_followed_topics)
| Shout.id.in_(reader_followed_shouts)
)
.scalar_subquery()
)
q = q.filter(Shout.id.in_(followed_subquery))
q, limit, offset = apply_options(q, options)
shouts = get_shouts_with_links(info, q, limit, offset=offset)
return shouts
@query.field("load_shouts_followed_by")
async def load_shouts_followed_by(_, info, slug: str, options) -> List[Shout]:
"""
Загружает публикации, на которые подписан автор по slug.
:param info: Информация о контексте GraphQL.
:param slug: Slug автора.
:param options: Опции фильтрации и сортировки.
:return: Список публикаций.
"""
with local_session() as session:
author = session.query(Author).filter(Author.slug == slug).first()
if author:
follower_id = author.dict()["id"]
shouts = shouts_by_follower(info, follower_id, options)
return shouts
return []
@query.field("load_shouts_feed")
@login_required
async def load_shouts_feed(_, info, options) -> List[Shout]:
"""
Загружает публикации, на которые подписан авторизованный пользователь.
:param info: Информация о контексте GraphQL.
:param options: Опции фильтрации и сортировки.
:return: Список публикаций.
"""
author_id = info.context.get("author", {}).get("id")
return shouts_by_follower(info, author_id, options) if author_id else []
@query.field("load_shouts_authored_by")
async def load_shouts_authored_by(_, info, slug: str, options) -> List[Shout]:
"""
Загружает публикации, написанные автором по slug.
:param info: Информация о контексте GraphQL.
:param slug: Slug автора.
:param options: Опции фильтрации и сортировки.
:return: Список публикаций.
"""
with local_session() as session:
author = session.query(Author).filter(Author.slug == slug).first()
if author:
try:
author_id: int = author.dict()["id"]
q = (
query_with_stat(info)
if has_field(info, "stat")
else select(Shout).filter(and_(Shout.published_at.is_not(None), Shout.deleted_at.is_(None)))
)
q = q.filter(Shout.authors.any(id=author_id))
q, limit, offset = apply_options(q, options, author_id)
shouts = get_shouts_with_links(info, q, limit, offset=offset)
return shouts
except Exception as error:
logger.debug(error)
return []
@query.field("load_shouts_with_topic")
async def load_shouts_with_topic(_, info, slug: str, options) -> List[Shout]:
"""
Загружает публикации, связанные с темой по slug.
:param info: Информация о контексте GraphQL.
:param slug: Slug темы.
:param options: Опции фильтрации и сортировки.
:return: Список публикаций.
"""
with local_session() as session:
topic = session.query(Topic).filter(Topic.slug == slug).first()
if topic:
try:
topic_id: int = topic.dict()["id"]
q = (
query_with_stat(info)
if has_field(info, "stat")
else select(Shout).filter(and_(Shout.published_at.is_not(None), Shout.deleted_at.is_(None)))
)
q = q.filter(Shout.topics.any(id=topic_id))
q, limit, offset = apply_options(q, options)
shouts = get_shouts_with_links(info, q, limit, offset=offset)
return shouts
except Exception as error:
logger.debug(error)
return []
def apply_filters(q, filters):
"""
Применяет фильтры к запросу
"""
logger.info(f"Applying filters: {filters}")
if filters.get("published"):
q = q.filter(Shout.published_at.is_not(None))
logger.info("Added published filter")
if filters.get("topic"):
topic_slug = filters["topic"]
q = q.join(ShoutTopic).join(Topic).filter(Topic.slug == topic_slug)
logger.info(f"Added topic filter: {topic_slug}")
return q

View File

@ -1,222 +0,0 @@
from typing import List
from graphql import GraphQLError
from sqlalchemy import select
from sqlalchemy.sql import and_
from cache.cache import (
cache_author,
cache_topic,
get_cached_follower_authors,
get_cached_follower_topics,
)
from orm.author import Author, AuthorFollower
from orm.community import Community, CommunityFollower
from orm.reaction import Reaction
from orm.shout import Shout, ShoutReactionsFollower
from orm.topic import Topic, TopicFollower
from resolvers.stat import get_with_stat
from services.auth import login_required
from services.db import local_session
from services.notify import notify_follower
from services.schema import mutation, query
from utils.logger import root_logger as logger
@mutation.field("follow")
@login_required
async def follow(_, info, what, slug="", entity_id=0):
logger.debug("Начало выполнения функции 'follow'")
user_id = info.context.get("user_id")
follower_dict = info.context.get("author")
logger.debug(f"follower: {follower_dict}")
if not user_id or not follower_dict:
return GraphQLError("unauthorized")
follower_id = follower_dict.get("id")
logger.debug(f"follower_id: {follower_id}")
entity_classes = {
"AUTHOR": (Author, AuthorFollower, get_cached_follower_authors, cache_author),
"TOPIC": (Topic, TopicFollower, get_cached_follower_topics, cache_topic),
"COMMUNITY": (Community, CommunityFollower, None, None), # Нет методов кэша для сообщества
"SHOUT": (Shout, ShoutReactionsFollower, None, None), # Нет методов кэша для shout
}
if what not in entity_classes:
logger.error(f"Неверный тип для следования: {what}")
return {"error": "invalid follow type"}
entity_class, follower_class, get_cached_follows_method, cache_method = entity_classes[what]
entity_type = what.lower()
entity_dict = None
try:
logger.debug("Попытка получить сущность из базы данных")
with local_session() as session:
entity_query = select(entity_class).filter(entity_class.slug == slug)
entities = get_with_stat(entity_query)
[entity] = entities
if not entity:
logger.warning(f"{what.lower()} не найден по slug: {slug}")
return {"error": f"{what.lower()} not found"}
if not entity_id and entity:
entity_id = entity.id
entity_dict = entity.dict()
logger.debug(f"entity_id: {entity_id}, entity_dict: {entity_dict}")
if entity_id:
logger.debug("Проверка существующей подписки")
with local_session() as session:
existing_sub = (
session.query(follower_class)
.filter(follower_class.follower == follower_id, getattr(follower_class, entity_type) == entity_id)
.first()
)
if existing_sub:
logger.info(f"Пользователь {follower_id} уже подписан на {what.lower()} с ID {entity_id}")
else:
logger.debug("Добавление новой записи в базу данных")
sub = follower_class(follower=follower_id, **{entity_type: entity_id})
logger.debug(f"Создан объект подписки: {sub}")
session.add(sub)
session.commit()
logger.info(f"Пользователь {follower_id} подписался на {what.lower()} с ID {entity_id}")
follows = None
if cache_method:
logger.debug("Обновление кэша")
await cache_method(entity_dict)
if get_cached_follows_method:
logger.debug("Получение подписок из кэша")
existing_follows = await get_cached_follows_method(follower_id)
follows = [*existing_follows, entity_dict] if not existing_sub else existing_follows
logger.debug("Обновлен список подписок")
if what == "AUTHOR" and not existing_sub:
logger.debug("Отправка уведомления автору о подписке")
await notify_follower(follower=follower_dict, author_id=entity_id, action="follow")
except Exception as exc:
logger.exception("Произошла ошибка в функции 'follow'")
return {"error": str(exc)}
return {f"{what.lower()}s": follows}
@mutation.field("unfollow")
@login_required
async def unfollow(_, info, what, slug="", entity_id=0):
logger.debug("Начало выполнения функции 'unfollow'")
user_id = info.context.get("user_id")
follower_dict = info.context.get("author")
logger.debug(f"follower: {follower_dict}")
if not user_id or not follower_dict:
logger.warning("Неавторизованный доступ при попытке отписаться")
return {"error": "unauthorized"}
follower_id = follower_dict.get("id")
logger.debug(f"follower_id: {follower_id}")
entity_classes = {
"AUTHOR": (Author, AuthorFollower, get_cached_follower_authors, cache_author),
"TOPIC": (Topic, TopicFollower, get_cached_follower_topics, cache_topic),
"COMMUNITY": (Community, CommunityFollower, None, None), # Нет методов кэша для сообщества
"SHOUT": (Shout, ShoutReactionsFollower, None, None), # Нет методов кэша для shout
}
if what not in entity_classes:
logger.error(f"Неверный тип для отписки: {what}")
return {"error": "invalid unfollow type"}
entity_class, follower_class, get_cached_follows_method, cache_method = entity_classes[what]
entity_type = what.lower()
follows = []
error = None
try:
logger.debug("Попытка получить сущность из базы данных")
with local_session() as session:
entity = session.query(entity_class).filter(entity_class.slug == slug).first()
logger.debug(f"Полученная сущность: {entity}")
if not entity:
logger.warning(f"{what.lower()} не найден по slug: {slug}")
return {"error": f"{what.lower()} not found"}
if entity and not entity_id:
entity_id = entity.id
logger.debug(f"entity_id: {entity_id}")
sub = (
session.query(follower_class)
.filter(
and_(
getattr(follower_class, "follower") == follower_id,
getattr(follower_class, entity_type) == entity_id,
)
)
.first()
)
logger.debug(f"Найдена подписка для удаления: {sub}")
if sub:
session.delete(sub)
session.commit()
logger.info(f"Пользователь {follower_id} отписался от {what.lower()} с ID {entity_id}")
if cache_method:
logger.debug("Обновление кэша после отписки")
await cache_method(entity.dict())
if get_cached_follows_method:
logger.debug("Получение подписок из кэша")
existing_follows = await get_cached_follows_method(follower_id)
follows = filter(lambda x: x["id"] != entity_id, existing_follows)
logger.debug("Обновлен список подписок")
if what == "AUTHOR":
logger.debug("Отправка уведомления автору об отписке")
await notify_follower(follower=follower_dict, author_id=entity_id, action="unfollow")
else:
return {"error": "following was not found", f"{entity_type}s": follows}
except Exception as exc:
logger.exception("Произошла ошибка в функции 'unfollow'")
import traceback
traceback.print_exc()
return {"error": str(exc)}
# logger.debug(f"Функция 'unfollow' завершена успешно с результатом: {entity_type}s={follows}, error={error}")
return {f"{entity_type}s": follows, "error": error}
@query.field("get_shout_followers")
def get_shout_followers(_, _info, slug: str = "", shout_id: int | None = None) -> List[Author]:
logger.debug("Начало выполнения функции 'get_shout_followers'")
followers = []
try:
with local_session() as session:
shout = None
if slug:
shout = session.query(Shout).filter(Shout.slug == slug).first()
logger.debug(f"Найден shout по slug: {slug} -> {shout}")
elif shout_id:
shout = session.query(Shout).filter(Shout.id == shout_id).first()
logger.debug(f"Найден shout по ID: {shout_id} -> {shout}")
if shout:
reactions = session.query(Reaction).filter(Reaction.shout == shout.id).all()
logger.debug(f"Полученные реакции для shout ID {shout.id}: {reactions}")
for r in reactions:
followers.append(r.created_by)
logger.debug(f"Добавлен follower: {r.created_by}")
except Exception as _exc:
import traceback
traceback.print_exc()
logger.exception("Произошла ошибка в функции 'get_shout_followers'")
return []
# logger.debug(f"Функция 'get_shout_followers' завершена с {len(followers)} подписчиками")
return followers

113
resolvers/inbox/chats.py Normal file
View File

@ -0,0 +1,113 @@
import json
import uuid
from datetime import datetime, timezone
from auth.authenticate import login_required
from auth.credentials import AuthCredentials
from base.redis import redis
from base.resolvers import mutation
from validations.inbox import Chat
@mutation.field("updateChat")
@login_required
async def update_chat(_, info, chat_new: Chat):
"""
updating chat
requires info["request"].user.slug to be in chat["admins"]
:param info: GraphQLInfo with request
:param chat_new: dict with chat data
:return: Result { error chat }
"""
auth: AuthCredentials = info.context["request"].auth
chat_id = chat_new["id"]
chat = await redis.execute("GET", f"chats/{chat_id}")
if not chat:
return {"error": "chat not exist"}
chat = dict(json.loads(chat))
# TODO
if auth.user_id in chat["admins"]:
chat.update(
{
"title": chat_new.get("title", chat["title"]),
"description": chat_new.get("description", chat["description"]),
"updatedAt": int(datetime.now(tz=timezone.utc).timestamp()),
"admins": chat_new.get("admins", chat.get("admins") or []),
"users": chat_new.get("users", chat["users"]),
}
)
await redis.execute("SET", f"chats/{chat.id}", json.dumps(chat))
await redis.execute("COMMIT")
return {"error": None, "chat": chat}
@mutation.field("createChat")
@login_required
async def create_chat(_, info, title="", members=[]):
auth: AuthCredentials = info.context["request"].auth
chat = {}
print("create_chat members: %r" % members)
if auth.user_id not in members:
members.append(int(auth.user_id))
# reuse chat craeted before if exists
if len(members) == 2 and title == "":
chat = None
print(members)
chatset1 = await redis.execute("SMEMBERS", f"chats_by_user/{members[0]}")
if not chatset1:
chatset1 = set([])
print(chatset1)
chatset2 = await redis.execute("SMEMBERS", f"chats_by_user/{members[1]}")
if not chatset2:
chatset2 = set([])
print(chatset2)
chatset = chatset1.intersection(chatset2)
print(chatset)
for c in chatset:
chat = await redis.execute("GET", f"chats/{c.decode('utf-8')}")
if chat:
chat = json.loads(chat)
if chat["title"] == "":
print("[inbox] createChat found old chat")
print(chat)
break
if chat:
return {"chat": chat, "error": "existed"}
chat_id = str(uuid.uuid4())
chat = {
"id": chat_id,
"users": members,
"title": title,
"createdBy": auth.user_id,
"createdAt": int(datetime.now(tz=timezone.utc).timestamp()),
"updatedAt": int(datetime.now(tz=timezone.utc).timestamp()),
"admins": members if (len(members) == 2 and title == "") else [],
}
for m in members:
await redis.execute("SADD", f"chats_by_user/{m}", chat_id)
await redis.execute("SET", f"chats/{chat_id}", json.dumps(chat))
await redis.execute("SET", f"chats/{chat_id}/next_message_id", str(0))
await redis.execute("COMMIT")
return {"error": None, "chat": chat}
@mutation.field("deleteChat")
@login_required
async def delete_chat(_, info, chat_id: str):
auth: AuthCredentials = info.context["request"].auth
chat = await redis.execute("GET", f"/chats/{chat_id}")
if chat:
chat = dict(json.loads(chat))
if auth.user_id in chat["admins"]:
await redis.execute("DEL", f"chats/{chat_id}")
await redis.execute("SREM", "chats_by_user/" + str(auth.user_id), chat_id)
await redis.execute("COMMIT")
else:
return {"error": "chat not exist"}

138
resolvers/inbox/load.py Normal file
View File

@ -0,0 +1,138 @@
import json
from auth.authenticate import login_required
from auth.credentials import AuthCredentials
from base.orm import local_session
from base.redis import redis
from base.resolvers import query
from orm.user import User
from resolvers.zine.profile import followed_authors
from .unread import get_unread_counter
# from datetime import datetime, timedelta, timezone
async def load_messages(chat_id: str, limit: int = 5, offset: int = 0, ids=[]):
"""load :limit messages for :chat_id with :offset"""
messages = []
message_ids = []
if ids:
message_ids += ids
try:
if limit:
mids = await redis.lrange(f"chats/{chat_id}/message_ids", offset, offset + limit)
mids = [mid.decode("utf-8") for mid in mids]
message_ids += mids
except Exception as e:
print(e)
if message_ids:
message_keys = [f"chats/{chat_id}/messages/{mid}" for mid in message_ids]
messages = await redis.mget(*message_keys)
messages = [json.loads(msg.decode("utf-8")) for msg in messages]
replies = []
for m in messages:
rt = m.get("replyTo")
if rt:
rt = int(rt)
if rt not in message_ids:
replies.append(rt)
if replies:
messages += await load_messages(chat_id, limit=0, ids=replies)
return messages
@query.field("loadChats")
@login_required
async def load_chats(_, info, limit: int = 50, offset: int = 0):
"""load :limit chats of current user with :offset"""
auth: AuthCredentials = info.context["request"].auth
cids = await redis.execute("SMEMBERS", "chats_by_user/" + str(auth.user_id))
if cids:
cids = list(cids)[offset : offset + limit]
if not cids:
print("[inbox.load] no chats were found")
cids = []
onliners = await redis.execute("SMEMBERS", "users-online")
if not onliners:
onliners = []
chats = []
for cid in cids:
cid = cid.decode("utf-8")
c = await redis.execute("GET", "chats/" + cid)
if c:
c = dict(json.loads(c))
c["messages"] = await load_messages(cid, 5, 0)
c["unread"] = await get_unread_counter(cid, auth.user_id)
with local_session() as session:
c["members"] = []
for uid in c["users"]:
a = session.query(User).where(User.id == uid).first()
if a:
c["members"].append(
{
"id": a.id,
"slug": a.slug,
"userpic": a.userpic,
"name": a.name,
"lastSeen": a.lastSeen,
"online": a.id in onliners,
}
)
chats.append(c)
return {"chats": chats, "error": None}
@query.field("loadMessagesBy")
@login_required
async def load_messages_by(_, info, by, limit: int = 10, offset: int = 0):
"""load :limit messages of :chat_id with :offset"""
auth: AuthCredentials = info.context["request"].auth
userchats = await redis.execute("SMEMBERS", "chats_by_user/" + str(auth.user_id))
userchats = [c.decode("utf-8") for c in userchats]
# print('[inbox] userchats: %r' % userchats)
if userchats:
# print('[inbox] loading messages by...')
messages = []
by_chat = by.get("chat")
if by_chat in userchats:
chat = await redis.execute("GET", f"chats/{by_chat}")
# print(chat)
if not chat:
return {"messages": [], "error": "chat not exist"}
# everyone's messages in filtered chat
messages = await load_messages(by_chat, limit, offset)
return {"messages": sorted(list(messages), key=lambda m: m["createdAt"]), "error": None}
else:
return {"error": "Cannot access messages of this chat"}
@query.field("loadRecipients")
async def load_recipients(_, info, limit=50, offset=0):
chat_users = []
auth: AuthCredentials = info.context["request"].auth
onliners = await redis.execute("SMEMBERS", "users-online")
if not onliners:
onliners = []
try:
chat_users += await followed_authors(auth.user_id)
limit = limit - len(chat_users)
except Exception:
pass
with local_session() as session:
chat_users += session.query(User).where(User.emailConfirmed).limit(limit).offset(offset)
members = []
for a in chat_users:
members.append(
{
"id": a.id,
"slug": a.slug,
"userpic": a.userpic,
"name": a.name,
"lastSeen": a.lastSeen,
"online": a.id in onliners,
}
)
return {"members": members, "error": None}

129
resolvers/inbox/messages.py Normal file
View File

@ -0,0 +1,129 @@
import json
from datetime import datetime, timezone
from auth.authenticate import login_required
from auth.credentials import AuthCredentials
from base.redis import redis
from base.resolvers import mutation
from services.following import FollowingManager, FollowingResult
@mutation.field("createMessage")
@login_required
async def create_message(_, info, chat: str, body: str, replyTo=None):
"""create message with :body for :chat_id replying to :replyTo optionally"""
auth: AuthCredentials = info.context["request"].auth
chat = await redis.execute("GET", f"chats/{chat}")
if not chat:
return {"error": "chat is not exist"}
else:
chat_dict = dict(json.loads(chat))
message_id = await redis.execute("GET", f"chats/{chat_dict['id']}/next_message_id")
message_id = int(message_id)
new_message = {
"chatId": chat_dict["id"],
"id": message_id,
"author": auth.user_id,
"body": body,
"createdAt": int(datetime.now(tz=timezone.utc).timestamp()),
}
if replyTo:
new_message["replyTo"] = replyTo
chat_dict["updatedAt"] = new_message["createdAt"]
await redis.execute("SET", f"chats/{chat_dict['id']}", json.dumps(chat))
print(f"[inbox] creating message {new_message}")
await redis.execute(
"SET", f"chats/{chat_dict['id']}/messages/{message_id}", json.dumps(new_message)
)
await redis.execute("LPUSH", f"chats/{chat_dict['id']}/message_ids", str(message_id))
await redis.execute("SET", f"chats/{chat_dict['id']}/next_message_id", str(message_id + 1))
users = chat_dict["users"]
for user_slug in users:
await redis.execute(
"LPUSH", f"chats/{chat_dict['id']}/unread/{user_slug}", str(message_id)
)
result = FollowingResult("NEW", "chat", new_message)
await FollowingManager.push("chat", result)
return {"message": new_message, "error": None}
@mutation.field("updateMessage")
@login_required
async def update_message(_, info, chat_id: str, message_id: int, body: str):
auth: AuthCredentials = info.context["request"].auth
chat = await redis.execute("GET", f"chats/{chat_id}")
if not chat:
return {"error": "chat not exist"}
message = await redis.execute("GET", f"chats/{chat_id}/messages/{message_id}")
if not message:
return {"error": "message not exist"}
message = json.loads(message)
if message["author"] != auth.user_id:
return {"error": "access denied"}
message["body"] = body
message["updatedAt"] = int(datetime.now(tz=timezone.utc).timestamp())
await redis.execute("SET", f"chats/{chat_id}/messages/{message_id}", json.dumps(message))
result = FollowingResult("UPDATED", "chat", message)
await FollowingManager.push("chat", result)
return {"message": message, "error": None}
@mutation.field("deleteMessage")
@login_required
async def delete_message(_, info, chat_id: str, message_id: int):
auth: AuthCredentials = info.context["request"].auth
chat = await redis.execute("GET", f"chats/{chat_id}")
if not chat:
return {"error": "chat not exist"}
chat = json.loads(chat)
message = await redis.execute("GET", f"chats/{chat_id}/messages/{str(message_id)}")
if not message:
return {"error": "message not exist"}
message = json.loads(message)
if message["author"] != auth.user_id:
return {"error": "access denied"}
await redis.execute("LREM", f"chats/{chat_id}/message_ids", 0, str(message_id))
await redis.execute("DEL", f"chats/{chat_id}/messages/{str(message_id)}")
users = chat["users"]
for user_id in users:
await redis.execute("LREM", f"chats/{chat_id}/unread/{user_id}", 0, str(message_id))
result = FollowingResult("DELETED", "chat", message)
await FollowingManager.push(result)
return {}
@mutation.field("markAsRead")
@login_required
async def mark_as_read(_, info, chat_id: str, messages: [int]):
auth: AuthCredentials = info.context["request"].auth
chat = await redis.execute("GET", f"chats/{chat_id}")
if not chat:
return {"error": "chat not exist"}
chat = json.loads(chat)
users = set(chat["users"])
if auth.user_id not in users:
return {"error": "access denied"}
for message_id in messages:
await redis.execute("LREM", f"chats/{chat_id}/unread/{auth.user_id}", 0, str(message_id))
return {"error": None}

96
resolvers/inbox/search.py Normal file
View File

@ -0,0 +1,96 @@
import json
from datetime import datetime, timedelta, timezone
from auth.authenticate import login_required
from auth.credentials import AuthCredentials
from base.orm import local_session
from base.redis import redis
from base.resolvers import query
from orm.user import AuthorFollower, User
from resolvers.inbox.load import load_messages
@query.field("searchRecipients")
@login_required
async def search_recipients(_, info, query: str, limit: int = 50, offset: int = 0):
result = []
# TODO: maybe redis scan?
auth: AuthCredentials = info.context["request"].auth
talk_before = await redis.execute("GET", f"/chats_by_user/{auth.user_id}")
if talk_before:
talk_before = list(json.loads(talk_before))[offset : offset + limit]
for chat_id in talk_before:
members = await redis.execute("GET", f"/chats/{chat_id}/users")
if members:
members = list(json.loads(members))
for member in members:
if member.startswith(query):
if member not in result:
result.append(member)
more_amount = limit - len(result)
with local_session() as session:
# followings
result += (
session.query(AuthorFollower.author)
.join(User, User.id == AuthorFollower.follower)
.where(User.slug.startswith(query))
.offset(offset + len(result))
.limit(more_amount)
)
more_amount = limit
# followers
result += (
session.query(AuthorFollower.follower)
.join(User, User.id == AuthorFollower.author)
.where(User.slug.startswith(query))
.offset(offset + len(result))
.limit(offset + len(result) + limit)
)
return {"members": list(result), "error": None}
@query.field("searchMessages")
@login_required
async def search_user_chats(by, messages, user_id: int, limit, offset):
cids = set([])
cids.union(set(await redis.execute("SMEMBERS", "chats_by_user/" + str(user_id))))
messages = []
by_author = by.get("author")
if by_author:
# all author's messages
cids.union(set(await redis.execute("SMEMBERS", f"chats_by_user/{by_author}")))
# author's messages in filtered chat
messages.union(set(filter(lambda m: m["author"] == by_author, list(messages))))
for c in cids:
c = c.decode("utf-8")
messages = await load_messages(c, limit, offset)
body_like = by.get("body")
if body_like:
# search in all messages in all user's chats
for c in cids:
# FIXME: use redis scan here
c = c.decode("utf-8")
mmm = await load_messages(c, limit, offset)
for m in mmm:
if body_like in m["body"]:
messages.add(m)
else:
# search in chat's messages
messages.extend(filter(lambda m: body_like in m["body"], list(messages)))
days = by.get("days")
if days:
messages.extend(
filter(
list(messages),
key=lambda m: (
datetime.now(tz=timezone.utc) - int(m["createdAt"]) < timedelta(days=by["days"])
),
)
)
return {"messages": messages, "error": None}

10
resolvers/inbox/unread.py Normal file
View File

@ -0,0 +1,10 @@
from base.redis import redis
async def get_unread_counter(chat_id: str, user_id: int):
try:
unread = await redis.execute("LLEN", f"chats/{chat_id.decode('utf-8')}/unread/{user_id}")
if unread:
return unread
except Exception:
return 0

View File

@ -0,0 +1,89 @@
from sqlalchemy import and_, desc, select, update
from auth.authenticate import login_required
from auth.credentials import AuthCredentials
from base.orm import local_session
from base.resolvers import mutation, query
from orm import Notification
@query.field("loadNotifications")
@login_required
async def load_notifications(_, info, params=None):
if params is None:
params = {}
auth: AuthCredentials = info.context["request"].auth
user_id = auth.user_id
limit = params.get("limit", 50)
offset = params.get("offset", 0)
q = (
select(Notification)
.where(Notification.user == user_id)
.order_by(desc(Notification.createdAt))
.limit(limit)
.offset(offset)
)
notifications = []
with local_session() as session:
total_count = session.query(Notification).where(Notification.user == user_id).count()
total_unread_count = (
session.query(Notification)
.where(and_(Notification.user == user_id, Notification.seen == False)) # noqa: E712
.count()
)
for [notification] in session.execute(q):
notification.type = notification.type.name
notifications.append(notification)
return {
"notifications": notifications,
"totalCount": total_count,
"totalUnreadCount": total_unread_count,
}
@mutation.field("markNotificationAsRead")
@login_required
async def mark_notification_as_read(_, info, notification_id: int):
auth: AuthCredentials = info.context["request"].auth
user_id = auth.user_id
with local_session() as session:
notification = (
session.query(Notification)
.where(and_(Notification.id == notification_id, Notification.user == user_id))
.one()
)
notification.seen = True
session.commit()
return {}
@mutation.field("markAllNotificationsAsRead")
@login_required
async def mark_all_notifications_as_read(_, info):
auth: AuthCredentials = info.context["request"].auth
user_id = auth.user_id
statement = (
update(Notification)
.where(and_(Notification.user == user_id, Notification.seen == False)) # noqa: E712
.values(seen=True)
)
with local_session() as session:
try:
session.execute(statement)
session.commit()
except Exception as e:
session.rollback()
print(f"[mark_all_notifications_as_read] error: {str(e)}")
return {}

View File

@ -1,316 +0,0 @@
import time
from typing import List, Tuple
import orjson
from sqlalchemy import and_, select
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import aliased
from sqlalchemy.sql import not_
from orm.author import Author
from orm.notification import (
Notification,
NotificationAction,
NotificationEntity,
NotificationSeen,
)
from orm.shout import Shout
from services.auth import login_required
from services.db import local_session
from services.schema import mutation, query
from utils.logger import root_logger as logger
def query_notifications(author_id: int, after: int = 0) -> Tuple[int, int, List[Tuple[Notification, bool]]]:
notification_seen_alias = aliased(NotificationSeen)
q = select(Notification, notification_seen_alias.viewer.label("seen")).outerjoin(
NotificationSeen,
and_(
NotificationSeen.viewer == author_id,
NotificationSeen.notification == Notification.id,
),
)
if after:
q = q.filter(Notification.created_at > after)
q = q.group_by(NotificationSeen.notification, Notification.created_at)
with local_session() as session:
total = (
session.query(Notification)
.filter(
and_(
Notification.action == NotificationAction.CREATE.value,
Notification.created_at > after,
)
)
.count()
)
unread = (
session.query(Notification)
.filter(
and_(
Notification.action == NotificationAction.CREATE.value,
Notification.created_at > after,
not_(Notification.seen),
)
)
.count()
)
notifications_result = session.execute(q)
notifications = []
for n, seen in notifications_result:
notifications.append((n, seen))
return total, unread, notifications
def group_notification(thread, authors=None, shout=None, reactions=None, entity="follower", action="follow"):
reactions = reactions or []
authors = authors or []
return {
"thread": thread,
"authors": authors,
"updated_at": int(time.time()),
"shout": shout,
"reactions": reactions,
"entity": entity,
"action": action,
}
def get_notifications_grouped(author_id: int, after: int = 0, limit: int = 10, offset: int = 0):
"""
Retrieves notifications for a given author.
Args:
author_id (int): The ID of the author for whom notifications are retrieved.
after (int, optional): If provided, selects only notifications created after this timestamp will be considered.
limit (int, optional): The maximum number of groupa to retrieve.
offset (int, optional): offset
Returns:
Dict[str, NotificationGroup], int, int: A dictionary where keys are thread IDs
and values are NotificationGroup objects, unread and total amounts.
This function queries the database to retrieve notifications for the specified author, considering optional filters.
The result is a dictionary where each key is a thread ID, and the corresponding value is a NotificationGroup
containing information about the notifications within that thread.
NotificationGroup structure:
{
entity: str, # Type of entity (e.g., 'reaction', 'shout', 'follower').
updated_at: int, # Timestamp of the latest update in the thread.
shout: Optional[NotificationShout]
reactions: List[int], # List of reaction ids within the thread.
authors: List[NotificationAuthor], # List of authors involved in the thread.
}
"""
total, unread, notifications = query_notifications(author_id, after)
groups_by_thread = {}
groups_amount = 0
for notification, seen in notifications:
if (groups_amount + offset) >= limit:
break
payload = orjson.loads(str(notification.payload))
if str(notification.entity) == NotificationEntity.SHOUT.value:
shout = payload
shout_id = shout.get("id")
author_id = shout.get("created_by")
thread_id = f"shout-{shout_id}"
with local_session() as session:
author = session.query(Author).filter(Author.id == author_id).first()
shout = session.query(Shout).filter(Shout.id == shout_id).first()
if author and shout:
author = author.dict()
shout = shout.dict()
group = group_notification(
thread_id,
shout=shout,
authors=[author],
action=str(notification.action),
entity=str(notification.entity),
)
groups_by_thread[thread_id] = group
groups_amount += 1
elif str(notification.entity) == NotificationEntity.REACTION.value:
reaction = payload
if not isinstance(reaction, dict):
raise ValueError("reaction data is not consistent")
shout_id = reaction.get("shout")
author_id = reaction.get("created_by", 0)
if shout_id and author_id:
with local_session() as session:
author = session.query(Author).filter(Author.id == author_id).first()
shout = session.query(Shout).filter(Shout.id == shout_id).first()
if shout and author:
author = author.dict()
shout = shout.dict()
reply_id = reaction.get("reply_to")
thread_id = f"shout-{shout_id}"
if reply_id and reaction.get("kind", "").lower() == "comment":
thread_id += f"{reply_id}"
existing_group = groups_by_thread.get(thread_id)
if existing_group:
existing_group["seen"] = False
existing_group["authors"].append(author_id)
existing_group["reactions"] = existing_group["reactions"] or []
existing_group["reactions"].append(reaction)
groups_by_thread[thread_id] = existing_group
else:
group = group_notification(
thread_id,
authors=[author],
shout=shout,
reactions=[reaction],
entity=str(notification.entity),
action=str(notification.action),
)
if group:
groups_by_thread[thread_id] = group
groups_amount += 1
elif str(notification.entity) == "follower":
thread_id = "followers"
follower = orjson.loads(payload)
group = groups_by_thread.get(thread_id)
if group:
if str(notification.action) == "follow":
group["authors"].append(follower)
elif str(notification.action) == "unfollow":
follower_id = follower.get("id")
for author in group["authors"]:
if author.get("id") == follower_id:
group["authors"].remove(author)
break
else:
group = group_notification(
thread_id,
authors=[follower],
entity=str(notification.entity),
action=str(notification.action),
)
groups_amount += 1
groups_by_thread[thread_id] = group
return groups_by_thread, unread, total
@query.field("load_notifications")
@login_required
async def load_notifications(_, info, after: int, limit: int = 50, offset=0):
author_dict = info.context.get("author")
author_id = author_dict.get("id")
error = None
total = 0
unread = 0
notifications = []
try:
if author_id:
groups, unread, total = get_notifications_grouped(author_id, after, limit)
notifications = sorted(groups.values(), key=lambda group: group.updated_at, reverse=True)
except Exception as e:
error = e
logger.error(e)
return {
"notifications": notifications,
"total": total,
"unread": unread,
"error": error,
}
@mutation.field("notification_mark_seen")
@login_required
async def notification_mark_seen(_, info, notification_id: int):
author_id = info.context.get("author", {}).get("id")
if author_id:
with local_session() as session:
try:
ns = NotificationSeen(notification=notification_id, viewer=author_id)
session.add(ns)
session.commit()
except SQLAlchemyError as e:
session.rollback()
logger.error(f"seen mutation failed: {e}")
return {"error": "cant mark as read"}
return {"error": None}
@mutation.field("notifications_seen_after")
@login_required
async def notifications_seen_after(_, info, after: int):
# TODO: use latest loaded notification_id as input offset parameter
error = None
try:
author_id = info.context.get("author", {}).get("id")
if author_id:
with local_session() as session:
nnn = session.query(Notification).filter(and_(Notification.created_at > after)).all()
for n in nnn:
try:
ns = NotificationSeen(notification=n.id, viewer=author_id)
session.add(ns)
session.commit()
except SQLAlchemyError:
session.rollback()
except Exception as e:
print(e)
error = "cant mark as read"
return {"error": error}
@mutation.field("notifications_seen_thread")
@login_required
async def notifications_seen_thread(_, info, thread: str, after: int):
error = None
author_id = info.context.get("author", {}).get("id")
if author_id:
[shout_id, reply_to_id] = thread.split(":")
with local_session() as session:
# TODO: handle new follower and new shout notifications
new_reaction_notifications = (
session.query(Notification)
.filter(
Notification.action == "create",
Notification.entity == "reaction",
Notification.created_at > after,
)
.all()
)
removed_reaction_notifications = (
session.query(Notification)
.filter(
Notification.action == "delete",
Notification.entity == "reaction",
Notification.created_at > after,
)
.all()
)
exclude = set()
for nr in removed_reaction_notifications:
reaction = orjson.loads(str(nr.payload))
reaction_id = reaction.get("id")
exclude.add(reaction_id)
for n in new_reaction_notifications:
reaction = orjson.loads(str(n.payload))
reaction_id = reaction.get("id")
if (
reaction_id not in exclude
and reaction.get("shout") == shout_id
and reaction.get("reply_to") == reply_to_id
):
try:
ns = NotificationSeen(notification=n.id, viewer=author_id)
session.add(ns)
session.commit()
except Exception as e:
logger.warn(e)
session.rollback()
else:
error = "You are not logged in"
return {"error": error}

View File

@ -1,49 +0,0 @@
from sqlalchemy import and_
from orm.rating import is_negative, is_positive
from orm.reaction import Reaction, ReactionKind
from orm.shout import Shout
from services.db import local_session
from utils.diff import apply_diff, get_diff
def handle_proposing(kind: ReactionKind, reply_to: int, shout_id: int):
with local_session() as session:
if is_positive(kind):
replied_reaction = (
session.query(Reaction).filter(Reaction.id == reply_to, Reaction.shout == shout_id).first()
)
if replied_reaction and replied_reaction.kind is ReactionKind.PROPOSE.value and replied_reaction.quote:
# patch all the proposals' quotes
proposals = (
session.query(Reaction)
.filter(
and_(
Reaction.shout == shout_id,
Reaction.kind == ReactionKind.PROPOSE.value,
)
)
.all()
)
# patch shout's body
shout = session.query(Shout).filter(Shout.id == shout_id).first()
body = replied_reaction.quote
Shout.update(shout, {body})
session.add(shout)
session.commit()
# реакция содержит цитату -> обновляются все предложения
# (proposals) для соответствующего Shout.
for proposal in proposals:
if proposal.quote:
proposal_diff = get_diff(shout.body, proposal.quote)
proposal_dict = proposal.dict()
proposal_dict["quote"] = apply_diff(replied_reaction.quote, proposal_diff)
Reaction.update(proposal, proposal_dict)
session.add(proposal)
if is_negative(kind):
# TODO: rejection logic
pass

View File

@ -1,337 +0,0 @@
from sqlalchemy import and_, case, func, select, true
from sqlalchemy.orm import aliased
from orm.author import Author, AuthorRating
from orm.reaction import Reaction, ReactionKind
from orm.shout import Shout
from services.auth import login_required
from services.db import local_session
from services.schema import mutation, query
from utils.logger import root_logger as logger
@query.field("get_my_rates_comments")
@login_required
async def get_my_rates_comments(_, info, comments: list[int]) -> list[dict]:
"""
Получение реакций пользователя на комментарии
Args:
info: Контекст запроса
comments: Список ID комментариев
Returns:
list[dict]: Список словарей с реакциями пользователя на комментарии
Каждый словарь содержит:
- comment_id: ID комментария
- my_rate: Тип реакции (LIKE/DISLIKE)
"""
author_dict = info.context.get("author") if info.context else None
author_id = author_dict.get("id") if author_dict else None
if not author_id:
return [] # Возвращаем пустой список вместо словаря с ошибкой
# Подзапрос для реакций текущего пользователя
rated_query = (
select(Reaction.id.label("comment_id"), Reaction.kind.label("my_rate"))
.where(
and_(
Reaction.reply_to.in_(comments),
Reaction.created_by == author_id,
Reaction.deleted_at.is_(None),
Reaction.kind.in_([ReactionKind.LIKE.value, ReactionKind.DISLIKE.value]),
)
)
.order_by(Reaction.shout, Reaction.created_at.desc())
.distinct(Reaction.shout)
)
with local_session() as session:
comments_result = session.execute(rated_query).all()
return [{"comment_id": row.comment_id, "my_rate": row.my_rate} for row in comments_result]
@query.field("get_my_rates_shouts")
@login_required
async def get_my_rates_shouts(_, info, shouts):
"""
Получение реакций пользователя на публикации
"""
author_dict = info.context.get("author") if info.context else None
author_id = author_dict.get("id") if author_dict else None
if not author_id:
return []
with local_session() as session:
try:
stmt = (
select(Reaction)
.where(
and_(
Reaction.shout.in_(shouts),
Reaction.reply_to.is_(None),
Reaction.created_by == author_id,
Reaction.deleted_at.is_(None),
Reaction.kind.in_([ReactionKind.LIKE.value, ReactionKind.DISLIKE.value]),
)
)
.order_by(Reaction.shout, Reaction.created_at.desc())
.distinct(Reaction.shout)
)
result = session.execute(stmt).all()
return [
{
"shout_id": row[0].shout, # Получаем shout_id из объекта Reaction
"my_rate": row[0].kind, # Получаем kind (my_rate) из объекта Reaction
}
for row in result
]
except Exception as e:
logger.error(f"Error in get_my_rates_shouts: {e}")
return []
@mutation.field("rate_author")
@login_required
async def rate_author(_, info, rated_slug, value):
info.context["user_id"]
rater_id = info.context.get("author", {}).get("id")
with local_session() as session:
rater_id = int(rater_id)
rated_author = session.query(Author).filter(Author.slug == rated_slug).first()
if rater_id and rated_author:
rating: AuthorRating = (
session.query(AuthorRating)
.filter(
and_(
AuthorRating.rater == rater_id,
AuthorRating.author == rated_author.id,
)
)
.first()
)
if rating:
rating.plus = value > 0
session.add(rating)
session.commit()
return {}
else:
try:
rating = AuthorRating(rater=rater_id, author=rated_author.id, plus=value > 0)
session.add(rating)
session.commit()
except Exception as err:
return {"error": err}
return {}
def count_author_comments_rating(session, author_id) -> int:
replied_alias = aliased(Reaction)
replies_likes = (
session.query(replied_alias)
.join(Reaction, replied_alias.id == Reaction.reply_to)
.where(
and_(
replied_alias.created_by == author_id,
replied_alias.kind == ReactionKind.COMMENT.value,
)
)
.filter(replied_alias.kind == ReactionKind.LIKE.value)
.count()
) or 0
replies_dislikes = (
session.query(replied_alias)
.join(Reaction, replied_alias.id == Reaction.reply_to)
.where(
and_(
replied_alias.created_by == author_id,
replied_alias.kind == ReactionKind.COMMENT.value,
)
)
.filter(replied_alias.kind == ReactionKind.DISLIKE.value)
.count()
) or 0
return replies_likes - replies_dislikes
def count_author_shouts_rating(session, author_id) -> int:
shouts_likes = (
session.query(Reaction, Shout)
.join(Shout, Shout.id == Reaction.shout)
.filter(
and_(
Shout.authors.any(id=author_id),
Reaction.kind == ReactionKind.LIKE.value,
)
)
.count()
or 0
)
shouts_dislikes = (
session.query(Reaction, Shout)
.join(Shout, Shout.id == Reaction.shout)
.filter(
and_(
Shout.authors.any(id=author_id),
Reaction.kind == ReactionKind.DISLIKE.value,
)
)
.count()
or 0
)
return shouts_likes - shouts_dislikes
def get_author_rating_old(session, author: Author):
likes_count = (
session.query(AuthorRating).filter(and_(AuthorRating.author == author.id, AuthorRating.plus.is_(True))).count()
)
dislikes_count = (
session.query(AuthorRating)
.filter(and_(AuthorRating.author == author.id, AuthorRating.plus.is_not(True)))
.count()
)
return likes_count - dislikes_count
def get_author_rating_shouts(session, author: Author) -> int:
q = (
select(
func.coalesce(
func.sum(
case(
(Reaction.kind == ReactionKind.LIKE.value, 1),
(Reaction.kind == ReactionKind.DISLIKE.value, -1),
else_=0,
)
),
0,
).label("shouts_rating")
)
.select_from(Reaction)
.outerjoin(Shout, Shout.authors.any(id=author.id))
.outerjoin(
Reaction,
and_(
Reaction.reply_to.is_(None),
Reaction.shout == Shout.id,
Reaction.deleted_at.is_(None),
),
)
)
result = session.execute(q).scalar()
return result
def get_author_rating_comments(session, author: Author) -> int:
replied_comment = aliased(Reaction)
q = (
select(
func.coalesce(
func.sum(
case(
(Reaction.kind == ReactionKind.LIKE.value, 1),
(Reaction.kind == ReactionKind.DISLIKE.value, -1),
else_=0,
)
),
0,
).label("shouts_rating")
)
.select_from(Reaction)
.outerjoin(
Reaction,
and_(
replied_comment.kind == ReactionKind.COMMENT.value,
replied_comment.created_by == author.id,
Reaction.kind.in_([ReactionKind.LIKE.value, ReactionKind.DISLIKE.value]),
Reaction.reply_to == replied_comment.id,
Reaction.deleted_at.is_(None),
),
)
)
result = session.execute(q).scalar()
return result
def add_author_rating_columns(q, group_list):
# NOTE: method is not used
# old karma
q = q.outerjoin(AuthorRating, AuthorRating.author == Author.id)
q = q.add_columns(func.sum(case((AuthorRating.plus == true(), 1), else_=-1)).label("rating"))
# by shouts rating
shout_reaction = aliased(Reaction)
shouts_rating_subq = (
select(
Author.id,
func.coalesce(
func.sum(
case(
(shout_reaction.kind == ReactionKind.LIKE.value, 1),
(shout_reaction.kind == ReactionKind.DISLIKE.value, -1),
else_=0,
)
),
0,
).label("shouts_rating"),
)
.select_from(shout_reaction)
.outerjoin(Shout, Shout.authors.any(id=Author.id))
.outerjoin(
shout_reaction,
and_(
shout_reaction.reply_to.is_(None),
shout_reaction.shout == Shout.id,
shout_reaction.deleted_at.is_(None),
),
)
.group_by(Author.id)
.subquery()
)
q = q.outerjoin(shouts_rating_subq, Author.id == shouts_rating_subq.c.id)
q = q.add_columns(shouts_rating_subq.c.shouts_rating)
group_list = [shouts_rating_subq.c.shouts_rating]
# by comments
replied_comment = aliased(Reaction)
reaction_2 = aliased(Reaction)
comments_subq = (
select(
Author.id,
func.coalesce(
func.sum(
case(
(reaction_2.kind == ReactionKind.LIKE.value, 1),
(reaction_2.kind == ReactionKind.DISLIKE.value, -1),
else_=0,
)
),
0,
).label("comments_rating"),
)
.select_from(reaction_2)
.outerjoin(
replied_comment,
and_(
replied_comment.kind == ReactionKind.COMMENT.value,
replied_comment.created_by == Author.id,
reaction_2.kind.in_([ReactionKind.LIKE.value, ReactionKind.DISLIKE.value]),
reaction_2.reply_to == replied_comment.id,
reaction_2.deleted_at.is_(None),
),
)
.group_by(Author.id)
.subquery()
)
q = q.outerjoin(comments_subq, Author.id == comments_subq.c.id)
q = q.add_columns(comments_subq.c.comments_rating)
group_list.extend([comments_subq.c.comments_rating])
return q, group_list

Some files were not shown because too many files have changed in this diff Show More