sync-precache
All checks were successful
deploy / deploy (push) Successful in 1m3s

This commit is contained in:
Untone 2023-12-19 18:31:31 +03:00
parent a0d111c50d
commit b141c26e80
5 changed files with 21 additions and 28 deletions

View File

@ -51,5 +51,6 @@ async def shutdown():
await redis.disconnect()
get_all_authors()
app = Starlette(debug=True, on_startup=[start_up], on_shutdown=[shutdown])
app.mount("/", GraphQL(schema, debug=True))

View File

@ -13,6 +13,7 @@ starlette = "^0.34.0"
uvicorn = "^0.24"
itsdangerous = "^2.1.2"
aiohttp = "^3.9.1"
requests = "^2.31.0"
[tool.poetry.group.dev.dependencies]
setuptools = "^69.0.2"

View File

@ -2,12 +2,6 @@ from resolvers.chats import create_chat, delete_chat, update_chat
from resolvers.load import load_chats, load_messages_by, load_recipients
from resolvers.messages import create_message, delete_message, mark_as_read, update_message
from resolvers.search import search_messages, search_recipients
from services.core import get_all_authors
import asyncio
print("[resolvers] init precache: getting all authors...")
asyncio.create_task(get_all_authors())
__all__ = [
# inbox

View File

@ -116,7 +116,7 @@ async def load_recipients(_, _info, limit=50, offset=0):
"""load possible chat participants"""
onliners: List[int] = (await redis.execute("SMEMBERS", "authors-online")) or []
r = []
my_followings: List[ChatMember] = await get_my_followed()
my_followings: List[ChatMember] = get_my_followed()
if len(my_followings) < limit:
my_followings = my_followings + list(authors_by_id.values())[offset : limit - len(my_followings)]
my_followings = list(set(my_followings))

View File

@ -1,6 +1,6 @@
from typing import Any, List
from aiohttp import ClientSession
import requests
from models.member import ChatMember
from settings import API_BASE
@ -9,19 +9,19 @@ authors_by_user = {}
authors_by_id = {}
async def _request_endpoint(query_name, body) -> Any:
async with ClientSession() as session:
async with session.post(API_BASE, headers=headers, json=body) as response:
print(f"[services.core] {query_name} response: <{response.status}> {(await response.text())[:65]}..")
if response.status == 200:
r = await response.json()
if r:
return r.get("data", {}).get(query_name, {})
return []
def _request_endpoint(query_name, body) -> Any:
response = requests.post(API_BASE, headers=headers, json=body)
print(f"[services.core] {query_name} response: <{response.status_code}> {response.text[:65]}..")
if response.status_code == 200:
r = response.json()
if r:
return r.get("data", {}).get(query_name, {})
return []
async def get_all_authors() -> List[ChatMember]:
def get_all_authors() -> List[ChatMember]:
if len(authors_by_user.keys()) == 0:
print("[services.core] precaching authors...")
query_name = "get_authors_all"
# Check if authors are already cached
@ -34,20 +34,17 @@ async def get_all_authors() -> List[ChatMember]:
}
# Make a request to load authors
authors = await _request_endpoint(query_name, gql)
authors = _request_endpoint(query_name, gql)
for a in authors:
if a["user"] in authors_by_user:
print("DOUBLE DOUBLE")
print(a)
authors_by_user[a["user"]] = a
authors_by_id[a["id"]] = a
print(f"[main] {len(authors)} authors precached")
for a in list(authors):
authors_by_user.__setitem__(a["user"], a)
authors_by_id.__setitem__(a["id"], a)
print(f"[services.core] {len(authors)} authors precached")
return list(authors_by_id.values())
async def get_my_followed() -> List[ChatMember]:
def get_my_followed() -> List[ChatMember]:
query_name = "get_my_followed"
gql = {
@ -55,4 +52,4 @@ async def get_my_followed() -> List[ChatMember]:
"variables": None,
}
return await _request_endpoint(query_name, gql) or []
return _request_endpoint(query_name, gql) or []