fmt
All checks were successful
Deploy to core / deploy (push) Successful in 2m0s

This commit is contained in:
2024-02-21 10:27:16 +03:00
parent 4f26812340
commit 7cf702eb98
35 changed files with 1059 additions and 825 deletions

View File

@@ -6,33 +6,36 @@ from dogpile.cache import make_region
from settings import ADMIN_SECRET, AUTH_URL
from services.logger import root_logger as logger
async def request_data(gql, headers=None):
if headers is None:
headers = {'Content-Type': 'application/json'}
headers = {"Content-Type": "application/json"}
try:
async with httpx.AsyncClient() as client:
response = await client.post(AUTH_URL, json=gql, headers=headers)
if response.status_code == 200:
data = response.json()
errors = data.get('errors')
errors = data.get("errors")
if errors:
logger.error(f'HTTP Errors: {errors}')
logger.error(f"HTTP Errors: {errors}")
else:
return data
except Exception as e:
# Handling and logging exceptions during authentication check
logger.error(f'request_data error: {e}')
logger.error(f"request_data error: {e}")
return None
# Создание региона кэша с TTL 30 секунд
region = make_region().configure('dogpile.cache.memory', expiration_time=30)
region = make_region().configure("dogpile.cache.memory", expiration_time=30)
# Функция-ключ для кэширования
def auth_cache_key(req):
token = req.headers.get('Authorization')
token = req.headers.get("Authorization")
return f"auth_token:{token}"
# Декоратор для кэширования запроса проверки токена
def cache_auth_request(f):
@wraps(f)
@@ -41,41 +44,43 @@ def cache_auth_request(f):
cache_key = auth_cache_key(req)
result = region.get(cache_key)
if result is None:
[user_id, user_roles] = await f(*args, **kwargs)
[user_id, user_roles] = await f(*args, **kwargs)
if user_id:
region.set(cache_key, [user_id, user_roles])
return result
return decorated_function
# Измененная функция проверки аутентификации с кэшированием
@cache_auth_request
async def check_auth(req):
token = req.headers.get('Authorization')
user_id = ''
token = req.headers.get("Authorization")
user_id = ""
user_roles = []
if token:
try:
# Logging the authentication token
logger.debug(f'{token}')
query_name = 'validate_jwt_token'
operation = 'ValidateToken'
logger.debug(f"{token}")
query_name = "validate_jwt_token"
operation = "ValidateToken"
variables = {
'params': {
'token_type': 'access_token',
'token': token,
"params": {
"token_type": "access_token",
"token": token,
}
}
gql = {
'query': f'query {operation}($params: ValidateJWTTokenInput!) {{ {query_name}(params: $params) {{ is_valid claims }} }}',
'variables': variables,
'operationName': operation,
"query": f"query {operation}($params: ValidateJWTTokenInput!) {{ {query_name}(params: $params) {{ is_valid claims }} }}",
"variables": variables,
"operationName": operation,
}
data = await request_data(gql)
if data:
user_data = data.get('data', {}).get(query_name, {}).get('claims', {})
user_id = user_data.get('sub')
user_roles = user_data.get('allowed_roles')
user_data = data.get("data", {}).get(query_name, {}).get("claims", {})
user_id = user_data.get("sub")
user_roles = user_data.get("allowed_roles")
except Exception as e:
import traceback
@@ -87,41 +92,41 @@ async def check_auth(req):
async def add_user_role(user_id):
logger.info(f'add author role for user_id: {user_id}')
query_name = '_update_user'
operation = 'UpdateUserRoles'
logger.info(f"add author role for user_id: {user_id}")
query_name = "_update_user"
operation = "UpdateUserRoles"
headers = {
'Content-Type': 'application/json',
'x-authorizer-admin-secret': ADMIN_SECRET,
"Content-Type": "application/json",
"x-authorizer-admin-secret": ADMIN_SECRET,
}
variables = {'params': {'roles': 'author, reader', 'id': user_id}}
variables = {"params": {"roles": "author, reader", "id": user_id}}
gql = {
'query': f'mutation {operation}($params: UpdateUserInput!) {{ {query_name}(params: $params) {{ id roles }} }}',
'variables': variables,
'operationName': operation,
"query": f"mutation {operation}($params: UpdateUserInput!) {{ {query_name}(params: $params) {{ id roles }} }}",
"variables": variables,
"operationName": operation,
}
data = await request_data(gql, headers)
if data:
user_id = data.get('data', {}).get(query_name, {}).get('id')
user_id = data.get("data", {}).get(query_name, {}).get("id")
return user_id
def login_required(f):
@wraps(f)
async def decorated_function(*args, **kwargs):
user_id = ''
user_id = ""
user_roles = []
info = args[1]
try:
req = info.context.get('request')
req = info.context.get("request")
[user_id, user_roles] = await check_auth(req)
except Exception as e:
logger.error(f"Failed to authenticate user: {e}")
if user_id:
logger.info(f' got {user_id} roles: {user_roles}')
info.context['user_id'] = user_id.strip()
info.context['roles'] = user_roles
logger.info(f" got {user_id} roles: {user_roles}")
info.context["user_id"] = user_id.strip()
info.context["roles"] = user_roles
return await f(*args, **kwargs)
return decorated_function
@@ -130,7 +135,7 @@ def login_required(f):
def auth_request(f):
@wraps(f)
async def decorated_function(*args, **kwargs):
user_id = ''
user_id = ""
user_roles = []
req = {}
try:
@@ -142,9 +147,9 @@ def auth_request(f):
traceback.print_exc()
logger.error(f"Failed to authenticate user: {args} {e}")
if user_id:
logger.info(f' got {user_id} roles: {user_roles}')
req['user_id'] = user_id.strip()
req['roles'] = user_roles
logger.info(f" got {user_id} roles: {user_roles}")
req["user_id"] = user_id.strip()
req["roles"] = user_roles
return await f(*args, **kwargs)
return decorated_function

View File

@@ -13,32 +13,33 @@ from services.logger import root_logger as logger
from settings import DB_URL
# Создание региона кэша с TTL 300 секунд
cache_region = make_region().configure(
'dogpile.cache.memory',
expiration_time=300
)
cache_region = make_region().configure("dogpile.cache.memory", expiration_time=300)
# Подключение к базе данных SQLAlchemy
engine = create_engine(DB_URL, echo=False, pool_size=10, max_overflow=20)
T = TypeVar('T')
T = TypeVar("T")
REGISTRY: Dict[str, type] = {}
Base = declarative_base()
# Перехватчики для журнала запросов SQLAlchemy
@event.listens_for(Engine, 'before_cursor_execute')
@event.listens_for(Engine, "before_cursor_execute")
def before_cursor_execute(conn, cursor, statement, parameters, context, executemany):
conn.info.setdefault('query_start_time', []).append(time.time())
conn.info.setdefault("query_start_time", []).append(time.time())
@event.listens_for(Engine, 'after_cursor_execute')
@event.listens_for(Engine, "after_cursor_execute")
def after_cursor_execute(conn, cursor, statement, parameters, context, executemany):
total = time.time() - conn.info['query_start_time'].pop(-1)
stars = '*' * math.floor(total*1000)
total = time.time() - conn.info["query_start_time"].pop(-1)
stars = "*" * math.floor(total * 1000)
if stars:
logger.debug(f'{statement}\n{stars} {total*1000} s\n')
logger.debug(f"{statement}\n{stars} {total*1000} s\n")
def local_session(src=''):
def local_session(src=""):
return Session(bind=engine, expire_on_commit=False)
class Base(declarative_base()):
__table__: Table
__tablename__: str
@@ -46,7 +47,7 @@ class Base(declarative_base()):
__init__: Callable
__allow_unmapped__ = True
__abstract__ = True
__table_args__ = {'extend_existing': True}
__table_args__ = {"extend_existing": True}
id = Column(Integer, primary_key=True)
@@ -55,12 +56,12 @@ class Base(declarative_base()):
def dict(self) -> Dict[str, Any]:
column_names = self.__table__.columns.keys()
if '_sa_instance_state' in column_names:
column_names.remove('_sa_instance_state')
if "_sa_instance_state" in column_names:
column_names.remove("_sa_instance_state")
try:
return {c: getattr(self, c) for c in column_names}
except Exception as e:
logger.error(f'Error occurred while converting object to dictionary: {e}')
logger.error(f"Error occurred while converting object to dictionary: {e}")
return {}
def update(self, values: Dict[str, Any]) -> None:
@@ -68,6 +69,7 @@ class Base(declarative_base()):
if hasattr(self, key):
setattr(self, key, value)
# Декоратор для кэширования методов
def cache_method(cache_key: str):
def decorator(f):
@@ -82,5 +84,7 @@ def cache_method(cache_key: str):
result = f(*args, **kwargs)
cache_region.set(key, result)
return result
return decorated_function
return decorator

View File

@@ -29,19 +29,19 @@ def apply_diff(original, diff):
The modified string.
"""
result = []
pattern = re.compile(r'^(\+|-) ')
pattern = re.compile(r"^(\+|-) ")
for line in diff:
match = pattern.match(line)
if match:
op = match.group(1)
content = line[2:]
if op == '+':
if op == "+":
result.append(content)
elif op == '-':
elif op == "-":
# Ignore deleted lines
pass
else:
result.append(line)
return ' '.join(result)
return " ".join(result)

View File

@@ -3,19 +3,19 @@ import colorlog
# Define the color scheme
color_scheme = {
'DEBUG': 'light_black',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
"DEBUG": "light_black",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red,bg_white",
}
# Define secondary log colors
secondary_colors = {
'log_name': {'DEBUG': 'blue'},
'asctime': {'DEBUG': 'cyan'},
'process': {'DEBUG': 'purple'},
'module': {'DEBUG': 'light_black,bg_blue'},
"log_name": {"DEBUG": "blue"},
"asctime": {"DEBUG": "cyan"},
"process": {"DEBUG": "purple"},
"module": {"DEBUG": "light_black,bg_blue"},
}
# Define the log format string
@@ -23,30 +23,30 @@ fmt_string = "%(log_color)s%(levelname)s: %(log_color)s[%(module)s]%(reset)s %(w
# Define formatting configuration
fmt_config = {
'log_colors': color_scheme,
'secondary_log_colors': secondary_colors,
'style': '%',
'reset': True
"log_colors": color_scheme,
"secondary_log_colors": secondary_colors,
"style": "%",
"reset": True,
}
class MultilineColoredFormatter(colorlog.ColoredFormatter):
def format(self, record):
# Check if the message is multiline
if record.getMessage() and '\n' in record.getMessage():
if record.getMessage() and "\n" in record.getMessage():
# Split the message into lines
lines = record.getMessage().split('\n')
lines = record.getMessage().split("\n")
formatted_lines = []
for line in lines:
# Format each line with the provided format
formatted_lines.append(super().format(record))
# Join the formatted lines
return '\n'.join(formatted_lines)
return "\n".join(formatted_lines)
else:
# If not multiline or no message, use the default formatting
return super().format(record)
# Create a MultilineColoredFormatter object for colorized logging
formatter = MultilineColoredFormatter(fmt_string, **fmt_config)
@@ -55,8 +55,7 @@ stream = logging.StreamHandler()
stream.setFormatter(formatter)
def get_colorful_logger(name='main'):
def get_colorful_logger(name="main"):
# Create and configure the logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
@@ -64,6 +63,7 @@ def get_colorful_logger(name='main'):
return logger
# Set up the root logger with the same formatting
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)

View File

@@ -3,43 +3,43 @@ import json
from services.rediscache import redis
async def notify_reaction(reaction, action: str = 'create'):
channel_name = 'reaction'
data = {'payload': reaction, 'action': action}
async def notify_reaction(reaction, action: str = "create"):
channel_name = "reaction"
data = {"payload": reaction, "action": action}
try:
await redis.publish(channel_name, json.dumps(data))
except Exception as e:
print(f'[services.notify] Failed to publish to channel {channel_name}: {e}')
print(f"[services.notify] Failed to publish to channel {channel_name}: {e}")
async def notify_shout(shout, action: str = 'update'):
channel_name = 'shout'
data = {'payload': shout, 'action': action}
async def notify_shout(shout, action: str = "update"):
channel_name = "shout"
data = {"payload": shout, "action": action}
try:
await redis.publish(channel_name, json.dumps(data))
except Exception as e:
print(f'[services.notify] Failed to publish to channel {channel_name}: {e}')
print(f"[services.notify] Failed to publish to channel {channel_name}: {e}")
async def notify_follower(follower: dict, author_id: int, action: str = 'follow'):
channel_name = f'follower:{author_id}'
async def notify_follower(follower: dict, author_id: int, action: str = "follow"):
channel_name = f"follower:{author_id}"
try:
# Simplify dictionary before publishing
simplified_follower = {k: follower[k] for k in ['id', 'name', 'slug', 'pic']}
simplified_follower = {k: follower[k] for k in ["id", "name", "slug", "pic"]}
data = {'payload': simplified_follower, 'action': action}
data = {"payload": simplified_follower, "action": action}
# Convert data to JSON string
json_data = json.dumps(data)
# Ensure the data is not empty before publishing
if not json_data:
raise ValueError('Empty data to publish.')
raise ValueError("Empty data to publish.")
# Use the 'await' keyword when publishing
await redis.publish(channel_name, json_data)
except Exception as e:
# Log the error and re-raise it
print(f'[services.notify] Failed to publish to channel {channel_name}: {e}')
print(f"[services.notify] Failed to publish to channel {channel_name}: {e}")
raise

View File

@@ -4,7 +4,6 @@ from services.logger import root_logger as logger
from settings import REDIS_URL
class RedisCache:
def __init__(self, uri=REDIS_URL):
self._uri: str = uri
@@ -21,7 +20,7 @@ class RedisCache:
async def execute(self, command, *args, **kwargs):
if self._client:
try:
logger.debug(f'{command} {args} {kwargs}')
logger.debug(f"{command} {args} {kwargs}")
r = await self._client.execute_command(command, *args, **kwargs)
logger.debug(type(r))
logger.debug(r)
@@ -52,4 +51,4 @@ class RedisCache:
redis = RedisCache()
__all__ = ['redis']
__all__ = ["redis"]

View File

@@ -7,67 +7,69 @@ from opensearchpy import OpenSearch
from services.logger import root_logger as logger
from services.rediscache import redis
ELASTIC_HOST = os.environ.get('ELASTIC_HOST', '').replace('https://', '')
ELASTIC_USER = os.environ.get('ELASTIC_USER', '')
ELASTIC_PASSWORD = os.environ.get('ELASTIC_PASSWORD', '')
ELASTIC_PORT = os.environ.get('ELASTIC_PORT', 9200)
ELASTIC_AUTH = f'{ELASTIC_USER}:{ELASTIC_PASSWORD}' if ELASTIC_USER else ''
ELASTIC_URL = os.environ.get('ELASTIC_URL', f'https://{ELASTIC_AUTH}@{ELASTIC_HOST}:{ELASTIC_PORT}')
ELASTIC_HOST = os.environ.get("ELASTIC_HOST", "").replace("https://", "")
ELASTIC_USER = os.environ.get("ELASTIC_USER", "")
ELASTIC_PASSWORD = os.environ.get("ELASTIC_PASSWORD", "")
ELASTIC_PORT = os.environ.get("ELASTIC_PORT", 9200)
ELASTIC_AUTH = f"{ELASTIC_USER}:{ELASTIC_PASSWORD}" if ELASTIC_USER else ""
ELASTIC_URL = os.environ.get(
"ELASTIC_URL", f"https://{ELASTIC_AUTH}@{ELASTIC_HOST}:{ELASTIC_PORT}"
)
REDIS_TTL = 86400 # 1 day in seconds
index_settings = {
'settings': {
'index': {
'number_of_shards': 1,
'auto_expand_replicas': '0-all',
"settings": {
"index": {
"number_of_shards": 1,
"auto_expand_replicas": "0-all",
},
'analysis': {
'analyzer': {
'ru': {
'tokenizer': 'standard',
'filter': ['lowercase', 'ru_stop', 'ru_stemmer'],
"analysis": {
"analyzer": {
"ru": {
"tokenizer": "standard",
"filter": ["lowercase", "ru_stop", "ru_stemmer"],
}
},
'filter': {
'ru_stemmer': {
'type': 'stemmer',
'language': 'russian',
"filter": {
"ru_stemmer": {
"type": "stemmer",
"language": "russian",
},
'ru_stop': {
'type': 'stop',
'stopwords': '_russian_',
"ru_stop": {
"type": "stop",
"stopwords": "_russian_",
},
},
},
},
'mappings': {
'properties': {
'body': {'type': 'text', 'analyzer': 'ru'},
'title': {'type': 'text', 'analyzer': 'ru'},
"mappings": {
"properties": {
"body": {"type": "text", "analyzer": "ru"},
"title": {"type": "text", "analyzer": "ru"},
# 'author': {'type': 'text'},
}
},
}
expected_mapping = index_settings['mappings']
expected_mapping = index_settings["mappings"]
class SearchService:
def __init__(self, index_name='search_index'):
def __init__(self, index_name="search_index"):
self.index_name = index_name
self.manager = Manager()
self.client = None
# Используем менеджер для создания Lock и Value
self.lock = self.manager.Lock()
self.initialized_flag = self.manager.Value('i', 0)
self.initialized_flag = self.manager.Value("i", 0)
# Only initialize the instance if it's not already initialized
if not self.initialized_flag.value and ELASTIC_HOST:
try:
self.client = OpenSearch(
hosts=[{'host': ELASTIC_HOST, 'port': ELASTIC_PORT}],
hosts=[{"host": ELASTIC_HOST, "port": ELASTIC_PORT}],
http_compress=True,
http_auth=(ELASTIC_USER, ELASTIC_PASSWORD),
use_ssl=True,
@@ -76,46 +78,50 @@ class SearchService:
ssl_show_warn=False,
# ca_certs = ca_certs_path
)
logger.info(' Клиент OpenSearch.org подключен')
logger.info(" Клиент OpenSearch.org подключен")
if self.lock.acquire(blocking=False):
try:
self.check_index()
finally:
self.lock.release()
else:
logger.debug(' проверка пропущена')
logger.debug(" проверка пропущена")
except Exception as exc:
logger.error(f' {exc}')
logger.error(f" {exc}")
self.client = None
def info(self):
if isinstance(self.client, OpenSearch):
logger.info(' Поиск подключен') # : {self.client.info()}')
logger.info(" Поиск подключен") # : {self.client.info()}')
else:
logger.info(' * Задайте переменные среды для подключения к серверу поиска')
logger.info(" * Задайте переменные среды для подключения к серверу поиска")
def delete_index(self):
if self.client:
logger.debug(f' Удаляем индекс {self.index_name}')
logger.debug(f" Удаляем индекс {self.index_name}")
self.client.indices.delete(index=self.index_name, ignore_unavailable=True)
def create_index(self):
if self.client:
if self.lock.acquire(blocking=False):
try:
logger.debug(f' Создаём новый индекс: {self.index_name} ')
self.client.indices.create(index=self.index_name, body=index_settings)
logger.debug(f" Создаём новый индекс: {self.index_name} ")
self.client.indices.create(
index=self.index_name, body=index_settings
)
self.client.indices.close(index=self.index_name)
self.client.indices.open(index=self.index_name)
finally:
self.lock.release()
else:
logger.debug(' ..')
logger.debug(" ..")
def put_mapping(self):
if self.client:
logger.debug(f' Разметка индекации {self.index_name}')
self.client.indices.put_mapping(index=self.index_name, body=expected_mapping)
logger.debug(f" Разметка индекации {self.index_name}")
self.client.indices.put_mapping(
index=self.index_name, body=expected_mapping
)
def check_index(self):
if self.client:
@@ -136,34 +142,36 @@ class SearchService:
finally:
self.lock.release()
else:
logger.debug(' ..')
logger.debug(" ..")
def index(self, shout):
if self.client:
id_ = str(shout.id)
logger.debug(f' Индексируем пост {id_}')
logger.debug(f" Индексируем пост {id_}")
self.client.index(index=self.index_name, id=id_, body=shout.dict())
async def search(self, text, limit, offset):
logger.debug(f' Ищем: {text}')
logger.debug(f" Ищем: {text}")
search_body = {
'query': {'match': {'_all': text}},
"query": {"match": {"_all": text}},
}
if self.client:
search_response = self.client.search(index=self.index_name, body=search_body, size=limit, from_=offset)
hits = search_response['hits']['hits']
search_response = self.client.search(
index=self.index_name, body=search_body, size=limit, from_=offset
)
hits = search_response["hits"]["hits"]
results = [
{
**hit['_source'],
'score': hit['_score'],
**hit["_source"],
"score": hit["_score"],
}
for hit in hits
]
# Use Redis as cache with TTL
redis_key = f'search:{text}'
await redis.execute('SETEX', redis_key, REDIS_TTL, json.dumps(results))
redis_key = f"search:{text}"
await redis.execute("SETEX", redis_key, REDIS_TTL, json.dumps(results))
return []

View File

@@ -22,9 +22,9 @@ def start_sentry():
integrations=[
StarletteIntegration(),
AriadneIntegration(),
SqlalchemyIntegration()
]
SqlalchemyIntegration(),
],
)
except Exception as e:
print('[services.sentry] init error')
print("[services.sentry] init error")
print(e)

View File

@@ -4,7 +4,7 @@ from services.rediscache import redis
async def get_unread_counter(chat_id: str, author_id: int) -> int:
r = await redis.execute('LLEN', f'chats/{chat_id}/unread/{author_id}')
r = await redis.execute("LLEN", f"chats/{chat_id}/unread/{author_id}")
if isinstance(r, str):
return int(r)
elif isinstance(r, int):
@@ -14,7 +14,7 @@ async def get_unread_counter(chat_id: str, author_id: int) -> int:
async def get_total_unread_counter(author_id: int) -> int:
chats_set = await redis.execute('SMEMBERS', f'chats_by_author/{author_id}')
chats_set = await redis.execute("SMEMBERS", f"chats_by_author/{author_id}")
s = 0
if isinstance(chats_set, str):
chats_set = json.loads(chats_set)

View File

@@ -7,7 +7,12 @@ from typing import Dict
# ga
from google.analytics.data_v1beta import BetaAnalyticsDataClient
from google.analytics.data_v1beta.types import DateRange, Dimension, Metric, RunReportRequest
from google.analytics.data_v1beta.types import (
DateRange,
Dimension,
Metric,
RunReportRequest,
)
from orm.author import Author
from orm.shout import Shout, ShoutAuthor, ShoutTopic
@@ -15,9 +20,9 @@ from orm.topic import Topic
from services.db import local_session
from services.logger import root_logger as logger
GOOGLE_KEYFILE_PATH = os.environ.get('GOOGLE_KEYFILE_PATH', '/dump/google-service.json')
GOOGLE_PROPERTY_ID = os.environ.get('GOOGLE_PROPERTY_ID', '')
VIEWS_FILEPATH = '/dump/views.json'
GOOGLE_KEYFILE_PATH = os.environ.get("GOOGLE_KEYFILE_PATH", "/dump/google-service.json")
GOOGLE_PROPERTY_ID = os.environ.get("GOOGLE_PROPERTY_ID", "")
VIEWS_FILEPATH = "/dump/views.json"
class ViewedStorage:
@@ -37,30 +42,32 @@ class ViewedStorage:
"""Подключение к клиенту Google Analytics с использованием аутентификации"""
self = ViewedStorage
async with self.lock:
os.environ.setdefault('GOOGLE_APPLICATION_CREDENTIALS', GOOGLE_KEYFILE_PATH)
os.environ.setdefault("GOOGLE_APPLICATION_CREDENTIALS", GOOGLE_KEYFILE_PATH)
if GOOGLE_KEYFILE_PATH and os.path.isfile(GOOGLE_KEYFILE_PATH):
# Using a default constructor instructs the client to use the credentials
# specified in GOOGLE_APPLICATION_CREDENTIALS environment variable.
self.analytics_client = BetaAnalyticsDataClient()
logger.info(' * Клиент Google Analytics успешно авторизован')
logger.info(" * Клиент Google Analytics успешно авторизован")
# Загрузка предварительно подсчитанных просмотров из файла JSON
self.load_precounted_views()
if os.path.exists(VIEWS_FILEPATH):
file_timestamp = os.path.getctime(VIEWS_FILEPATH)
self.start_date = datetime.fromtimestamp(file_timestamp).strftime('%Y-%m-%d')
now_date = datetime.now().strftime('%Y-%m-%d')
self.start_date = datetime.fromtimestamp(file_timestamp).strftime(
"%Y-%m-%d"
)
now_date = datetime.now().strftime("%Y-%m-%d")
if now_date == self.start_date:
logger.info(' * Данные актуализованы!')
logger.info(" * Данные актуализованы!")
else:
logger.info(f' * Миграция проводилась: {self.start_date}')
logger.info(f" * Миграция проводилась: {self.start_date}")
# Запуск фоновой задачи
asyncio.create_task(self.worker())
else:
logger.info(' * Пожалуйста, добавьте ключевой файл Google Analytics')
logger.info(" * Пожалуйста, добавьте ключевой файл Google Analytics")
self.disabled = True
@staticmethod
@@ -68,28 +75,32 @@ class ViewedStorage:
"""Загрузка предварительно подсчитанных просмотров из файла JSON"""
self = ViewedStorage
try:
with open(VIEWS_FILEPATH, 'r') as file:
with open(VIEWS_FILEPATH, "r") as file:
precounted_views = json.load(file)
self.views_by_shout.update(precounted_views)
logger.info(f' * {len(precounted_views)} публикаций с просмотрами успешно загружены.')
logger.info(
f" * {len(precounted_views)} публикаций с просмотрами успешно загружены."
)
except Exception as e:
logger.error(f'Ошибка загрузки предварительно подсчитанных просмотров: {e}')
logger.error(f"Ошибка загрузки предварительно подсчитанных просмотров: {e}")
@staticmethod
async def update_pages():
"""Запрос всех страниц от Google Analytics, отсортированных по количеству просмотров"""
self = ViewedStorage
logger.info(' ⎧ Обновление данных просмотров от Google Analytics ---')
logger.info(" ⎧ Обновление данных просмотров от Google Analytics ---")
if not self.disabled:
try:
start = time.time()
async with self.lock:
if self.analytics_client:
request = RunReportRequest(
property=f'properties/{GOOGLE_PROPERTY_ID}',
dimensions=[Dimension(name='pagePath')],
metrics=[Metric(name='screenPageViews')],
date_ranges=[DateRange(start_date=self.start_date, end_date='today')],
property=f"properties/{GOOGLE_PROPERTY_ID}",
dimensions=[Dimension(name="pagePath")],
metrics=[Metric(name="screenPageViews")],
date_ranges=[
DateRange(start_date=self.start_date, end_date="today")
],
)
response = self.analytics_client.run_report(request)
if response and isinstance(response.rows, list):
@@ -102,21 +113,23 @@ class ViewedStorage:
# Извлечение путей страниц из ответа Google Analytics
if isinstance(row.dimension_values, list):
page_path = row.dimension_values[0].value
slug = page_path.split('discours.io/')[-1]
slug = page_path.split("discours.io/")[-1]
views_count = int(row.metric_values[0].value)
# Обновление данных в хранилище
self.views_by_shout[slug] = self.views_by_shout.get(slug, 0)
self.views_by_shout[slug] = self.views_by_shout.get(
slug, 0
)
self.views_by_shout[slug] += views_count
self.update_topics(slug)
# Запись путей страниц для логирования
slugs.add(slug)
logger.info(f' ⎪ Собрано страниц: {len(slugs)} ')
logger.info(f" ⎪ Собрано страниц: {len(slugs)} ")
end = time.time()
logger.info(' ⎪ Обновление страниц заняло %fs ' % (end - start))
logger.info(" ⎪ Обновление страниц заняло %fs " % (end - start))
except Exception as error:
logger.error(error)
@@ -165,12 +178,20 @@ class ViewedStorage:
# Обновление тем и авторов с использованием вспомогательной функции
for [_shout_topic, topic] in (
session.query(ShoutTopic, Topic).join(Topic).join(Shout).where(Shout.slug == shout_slug).all()
session.query(ShoutTopic, Topic)
.join(Topic)
.join(Shout)
.where(Shout.slug == shout_slug)
.all()
):
update_groups(self.shouts_by_topic, topic.slug, shout_slug)
for [_shout_topic, author] in (
session.query(ShoutAuthor, Author).join(Author).join(Shout).where(Shout.slug == shout_slug).all()
session.query(ShoutAuthor, Author)
.join(Author)
.join(Shout)
.where(Shout.slug == shout_slug)
.all()
):
update_groups(self.shouts_by_author, author.slug, shout_slug)
@@ -188,15 +209,18 @@ class ViewedStorage:
failed = 0
except Exception as _exc:
failed += 1
logger.info(' - Обновление не удалось #%d, ожидание 10 секунд' % failed)
logger.info(" - Обновление не удалось #%d, ожидание 10 секунд" % failed)
if failed > 3:
logger.info(' - Больше не пытаемся обновить')
logger.info(" - Больше не пытаемся обновить")
break
if failed == 0:
when = datetime.now(timezone.utc) + timedelta(seconds=self.period)
t = format(when.astimezone().isoformat())
logger.info(' ⎩ Следующее обновление: %s' % (t.split('T')[0] + ' ' + t.split('T')[1].split('.')[0]))
logger.info(
" ⎩ Следующее обновление: %s"
% (t.split("T")[0] + " " + t.split("T")[1].split(".")[0])
)
await asyncio.sleep(self.period)
else:
await asyncio.sleep(10)
logger.info(' - Попытка снова обновить данные')
logger.info(" - Попытка снова обновить данные")

View File

@@ -15,22 +15,26 @@ class WebhookEndpoint(HTTPEndpoint):
try:
data = await request.json()
if data:
auth = request.headers.get('Authorization')
auth = request.headers.get("Authorization")
if auth:
if auth == os.environ.get('WEBHOOK_SECRET'):
user_id: str = data['user']['id']
name: str = data['user']['given_name']
slug: str = data['user']['email'].split('@')[0]
slug: str = re.sub('[^0-9a-z]+', '-', slug.lower())
if auth == os.environ.get("WEBHOOK_SECRET"):
user_id: str = data["user"]["id"]
name: str = data["user"]["given_name"]
slug: str = data["user"]["email"].split("@")[0]
slug: str = re.sub("[^0-9a-z]+", "-", slug.lower())
with local_session() as session:
author = session.query(Author).filter(Author.slug == slug).first()
author = (
session.query(Author)
.filter(Author.slug == slug)
.first()
)
if author:
slug = slug + '-' + user_id.split('-').pop()
slug = slug + "-" + user_id.split("-").pop()
await create_author(user_id, slug, name)
return JSONResponse({'status': 'success'})
return JSONResponse({"status": "success"})
except Exception as e:
import traceback
traceback.print_exc()
return JSONResponse({'status': 'error', 'message': str(e)}, status_code=500)
return JSONResponse({"status": "error", "message": str(e)}, status_code=500)