Compare commits
71 Commits
dev
...
feat/sv-se
Author | SHA1 | Date | |
---|---|---|---|
![]() |
e1d1096674 | ||
![]() |
82870a4e47 | ||
![]() |
80b909d801 | ||
![]() |
1ada0a02f9 | ||
![]() |
44aef147b5 | ||
![]() |
2bebfbd4df | ||
![]() |
f19248184a | ||
![]() |
7df9361daa | ||
![]() |
e38a1c1338 | ||
![]() |
1281157d93 | ||
![]() |
0018749905 | ||
![]() |
c344fcee2d | ||
![]() |
a1a61a6731 | ||
![]() |
8d6ad2c84f | ||
![]() |
beba1992e9 | ||
![]() |
b0296d7747 | ||
![]() |
98e3dff35e | ||
![]() |
3782a9dffb | ||
![]() |
93c00b3dd1 | ||
![]() |
fac43e5997 | ||
![]() |
e7facf8d87 | ||
![]() |
3062a2b7de | ||
![]() |
c0406dbbf2 | ||
![]() |
ab4610575f | ||
![]() |
5425dbf832 | ||
![]() |
a10db2d38a | ||
![]() |
83e70856cd | ||
![]() |
11654dba68 | ||
![]() |
ec9465ad40 | ||
![]() |
4d965fb27b | ||
![]() |
e382cc1ea5 | ||
83d61ca76d | |||
![]() |
106222b0e0 | ||
![]() |
c533241d1e | ||
![]() |
78326047bf | ||
![]() |
bc4ec79240 | ||
![]() |
a0db5707c4 | ||
![]() |
ecc443c3ad | ||
![]() |
9a02ca74ad | ||
![]() |
9ebb81cbd3 | ||
![]() |
0bc55977ac | ||
![]() |
ff3a4debce | ||
![]() |
ae85b32f69 | ||
![]() |
34a354e9e3 | ||
![]() |
e405fb527b | ||
![]() |
7f36f93d92 | ||
![]() |
f089a32394 | ||
![]() |
1fd623a660 | ||
![]() |
88012f1b8c | ||
![]() |
6e284640c0 | ||
![]() |
077cb46482 | ||
![]() |
60a13a9097 | ||
![]() |
316375bf18 | ||
![]() |
fb820f67fd | ||
![]() |
f1d9f4e036 | ||
![]() |
ebb67eb311 | ||
![]() |
50a8c24ead | ||
![]() |
eb4b9363ab | ||
![]() |
19c5028a0c | ||
![]() |
57e1e8e6bd | ||
![]() |
385057ffcd | ||
![]() |
90699768ff | ||
![]() |
ad0ca75aa9 | ||
![]() |
39242d5e6c | ||
![]() |
24cca7f2cb | ||
![]() |
a9c7ac49d6 | ||
![]() |
f249752db5 | ||
![]() |
c0b2116da2 | ||
![]() |
59e71c8144 | ||
![]() |
e6a416383d | ||
![]() |
d55448398d |
|
@ -29,7 +29,16 @@ jobs:
|
|||
if: github.ref == 'refs/heads/dev'
|
||||
uses: dokku/github-action@master
|
||||
with:
|
||||
branch: 'dev'
|
||||
branch: 'main'
|
||||
force: true
|
||||
git_remote_url: 'ssh://dokku@v2.discours.io:22/core'
|
||||
ssh_private_key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: Push to dokku for staging branch
|
||||
if: github.ref == 'refs/heads/staging'
|
||||
uses: dokku/github-action@master
|
||||
with:
|
||||
branch: 'dev'
|
||||
git_remote_url: 'ssh://dokku@staging.discours.io:22/core'
|
||||
ssh_private_key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
git_push_flags: '--force'
|
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -128,6 +128,9 @@ dmypy.json
|
|||
.idea
|
||||
temp.*
|
||||
|
||||
# Debug
|
||||
DEBUG.log
|
||||
|
||||
discours.key
|
||||
discours.crt
|
||||
discours.pem
|
||||
|
@ -162,3 +165,4 @@ views.json
|
|||
*.crt
|
||||
*cache.json
|
||||
.cursor
|
||||
.devcontainer/
|
||||
|
|
|
@ -3,6 +3,7 @@ FROM python:slim
|
|||
RUN apt-get update && apt-get install -y \
|
||||
postgresql-client \
|
||||
curl \
|
||||
build-essential \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
|
12
cache/precache.py
vendored
12
cache/precache.py
vendored
|
@ -77,11 +77,15 @@ async def precache_topics_followers(topic_id: int, session):
|
|||
|
||||
async def precache_data():
|
||||
logger.info("precaching...")
|
||||
logger.debug("Entering precache_data")
|
||||
try:
|
||||
key = "authorizer_env"
|
||||
logger.debug(f"Fetching existing hash for key '{key}' from Redis")
|
||||
# cache reset
|
||||
value = await redis.execute("HGETALL", key)
|
||||
logger.debug(f"Fetched value for '{key}': {value}")
|
||||
await redis.execute("FLUSHDB")
|
||||
logger.debug("Redis database flushed")
|
||||
logger.info("redis: FLUSHDB")
|
||||
|
||||
# Преобразуем словарь в список аргументов для HSET
|
||||
|
@ -97,21 +101,27 @@ async def precache_data():
|
|||
await redis.execute("HSET", key, *value)
|
||||
logger.info(f"redis hash '{key}' was restored")
|
||||
|
||||
logger.info("Beginning topic precache phase")
|
||||
with local_session() as session:
|
||||
# topics
|
||||
q = select(Topic).where(Topic.community == 1)
|
||||
topics = get_with_stat(q)
|
||||
logger.info(f"Found {len(topics)} topics to precache")
|
||||
for topic in topics:
|
||||
topic_dict = topic.dict() if hasattr(topic, "dict") else topic
|
||||
logger.debug(f"Precaching topic id={topic_dict.get('id')}")
|
||||
await cache_topic(topic_dict)
|
||||
logger.debug(f"Cached topic id={topic_dict.get('id')}")
|
||||
await asyncio.gather(
|
||||
precache_topics_followers(topic_dict["id"], session),
|
||||
precache_topics_authors(topic_dict["id"], session),
|
||||
)
|
||||
logger.debug(f"Finished precaching followers and authors for topic id={topic_dict.get('id')}")
|
||||
logger.info(f"{len(topics)} topics and their followings precached")
|
||||
|
||||
# authors
|
||||
authors = get_with_stat(select(Author).where(Author.user.is_not(None)))
|
||||
logger.info(f"Found {len(authors)} authors to precache")
|
||||
logger.info(f"{len(authors)} authors found in database")
|
||||
for author in authors:
|
||||
if isinstance(author, Author):
|
||||
|
@ -119,10 +129,12 @@ async def precache_data():
|
|||
author_id = profile.get("id")
|
||||
user_id = profile.get("user", "").strip()
|
||||
if author_id and user_id:
|
||||
logger.debug(f"Precaching author id={author_id}")
|
||||
await cache_author(profile)
|
||||
await asyncio.gather(
|
||||
precache_authors_followers(author_id, session), precache_authors_follows(author_id, session)
|
||||
)
|
||||
logger.debug(f"Finished precaching followers and follows for author id={author_id}")
|
||||
else:
|
||||
logger.error(f"fail caching {author}")
|
||||
logger.info(f"{len(authors)} authors and their followings precached")
|
||||
|
|
61
main.py
61
main.py
|
@ -17,7 +17,7 @@ from cache.revalidator import revalidation_manager
|
|||
from services.exception import ExceptionHandlerMiddleware
|
||||
from services.redis import redis
|
||||
from services.schema import create_all_tables, resolvers
|
||||
from services.search import search_service
|
||||
from services.search import search_service, initialize_search_index
|
||||
from services.viewed import ViewedStorage
|
||||
from services.webhook import WebhookEndpoint, create_webhook_endpoint
|
||||
from settings import DEV_SERVER_PID_FILE_NAME, MODE
|
||||
|
@ -34,24 +34,79 @@ async def start():
|
|||
f.write(str(os.getpid()))
|
||||
print(f"[main] process started in {MODE} mode")
|
||||
|
||||
async def check_search_service():
|
||||
"""Check if search service is available and log result"""
|
||||
info = await search_service.info()
|
||||
if info.get("status") in ["error", "unavailable"]:
|
||||
print(f"[WARNING] Search service unavailable: {info.get('message', 'unknown reason')}")
|
||||
else:
|
||||
print(f"[INFO] Search service is available: {info}")
|
||||
|
||||
# Helper to run precache with timeout and catch errors
|
||||
async def precache_with_timeout():
|
||||
try:
|
||||
await asyncio.wait_for(precache_data(), timeout=60)
|
||||
except asyncio.TimeoutError:
|
||||
print("[precache] Precache timed out after 60 seconds")
|
||||
except Exception as e:
|
||||
print(f"[precache] Error during precache: {e}")
|
||||
|
||||
|
||||
# indexing DB data
|
||||
# async def indexing():
|
||||
# from services.db import fetch_all_shouts
|
||||
# all_shouts = await fetch_all_shouts()
|
||||
# await initialize_search_index(all_shouts)
|
||||
async def lifespan(_app):
|
||||
try:
|
||||
print("[lifespan] Starting application initialization")
|
||||
create_all_tables()
|
||||
|
||||
# schedule precaching in background with timeout and error handling
|
||||
asyncio.create_task(precache_with_timeout())
|
||||
|
||||
await asyncio.gather(
|
||||
redis.connect(),
|
||||
precache_data(),
|
||||
ViewedStorage.init(),
|
||||
create_webhook_endpoint(),
|
||||
search_service.info(),
|
||||
check_search_service(),
|
||||
start(),
|
||||
revalidation_manager.start(),
|
||||
)
|
||||
print("[lifespan] Basic initialization complete")
|
||||
|
||||
# Add a delay before starting the intensive search indexing
|
||||
print("[lifespan] Waiting for system stabilization before search indexing...")
|
||||
await asyncio.sleep(10) # 10-second delay to let the system stabilize
|
||||
|
||||
# Start search indexing as a background task with lower priority
|
||||
asyncio.create_task(initialize_search_index_background())
|
||||
|
||||
yield
|
||||
finally:
|
||||
print("[lifespan] Shutting down application services")
|
||||
tasks = [redis.disconnect(), ViewedStorage.stop(), revalidation_manager.stop()]
|
||||
await asyncio.gather(*tasks, return_exceptions=True)
|
||||
print("[lifespan] Shutdown complete")
|
||||
|
||||
# Initialize search index in the background
|
||||
async def initialize_search_index_background():
|
||||
"""Run search indexing as a background task with low priority"""
|
||||
try:
|
||||
print("[search] Starting background search indexing process")
|
||||
from services.db import fetch_all_shouts
|
||||
|
||||
# Get total count first (optional)
|
||||
all_shouts = await fetch_all_shouts()
|
||||
total_count = len(all_shouts) if all_shouts else 0
|
||||
print(f"[search] Fetched {total_count} shouts for background indexing")
|
||||
|
||||
# Start the indexing process with the fetched shouts
|
||||
print("[search] Beginning background search index initialization...")
|
||||
await initialize_search_index(all_shouts)
|
||||
print("[search] Background search index initialization complete")
|
||||
except Exception as e:
|
||||
print(f"[search] Error in background search indexing: {str(e)}")
|
||||
|
||||
# Создаем экземпляр GraphQL
|
||||
graphql_app = GraphQL(schema, debug=True)
|
||||
|
|
28
orm/shout.py
28
orm/shout.py
|
@ -71,6 +71,34 @@ class ShoutAuthor(Base):
|
|||
class Shout(Base):
|
||||
"""
|
||||
Публикация в системе.
|
||||
|
||||
Attributes:
|
||||
body (str)
|
||||
slug (str)
|
||||
cover (str) : "Cover image url"
|
||||
cover_caption (str) : "Cover image alt caption"
|
||||
lead (str)
|
||||
title (str)
|
||||
subtitle (str)
|
||||
layout (str)
|
||||
media (dict)
|
||||
authors (list[Author])
|
||||
topics (list[Topic])
|
||||
reactions (list[Reaction])
|
||||
lang (str)
|
||||
version_of (int)
|
||||
oid (str)
|
||||
seo (str) : JSON
|
||||
draft (int)
|
||||
created_at (int)
|
||||
updated_at (int)
|
||||
published_at (int)
|
||||
featured_at (int)
|
||||
deleted_at (int)
|
||||
created_by (int)
|
||||
updated_by (int)
|
||||
deleted_by (int)
|
||||
community (int)
|
||||
"""
|
||||
|
||||
__tablename__ = "shout"
|
||||
|
|
|
@ -13,6 +13,10 @@ starlette
|
|||
gql
|
||||
ariadne
|
||||
granian
|
||||
|
||||
# NLP and search
|
||||
httpx
|
||||
|
||||
orjson
|
||||
pydantic
|
||||
trafilatura
|
|
@ -8,6 +8,7 @@ from resolvers.author import ( # search_authors,
|
|||
get_author_id,
|
||||
get_authors_all,
|
||||
load_authors_by,
|
||||
load_authors_search,
|
||||
update_author,
|
||||
)
|
||||
from resolvers.community import get_communities_all, get_community
|
||||
|
@ -73,6 +74,7 @@ __all__ = [
|
|||
"get_author_follows_authors",
|
||||
"get_authors_all",
|
||||
"load_authors_by",
|
||||
"load_authors_search",
|
||||
"update_author",
|
||||
## "search_authors",
|
||||
# community
|
||||
|
|
|
@ -20,6 +20,7 @@ from services.auth import login_required
|
|||
from services.db import local_session
|
||||
from services.redis import redis
|
||||
from services.schema import mutation, query
|
||||
from services.search import search_service
|
||||
from utils.logger import root_logger as logger
|
||||
|
||||
DEFAULT_COMMUNITIES = [1]
|
||||
|
@ -301,6 +302,46 @@ async def load_authors_by(_, _info, by, limit, offset):
|
|||
return await get_authors_with_stats(limit, offset, by)
|
||||
|
||||
|
||||
@query.field("load_authors_search")
|
||||
async def load_authors_search(_, info, text: str, limit: int = 10, offset: int = 0):
|
||||
"""
|
||||
Resolver for searching authors by text. Works with txt-ai search endpony.
|
||||
Args:
|
||||
text: Search text
|
||||
limit: Maximum number of authors to return
|
||||
offset: Offset for pagination
|
||||
Returns:
|
||||
list: List of authors matching the search criteria
|
||||
"""
|
||||
|
||||
# Get author IDs from search engine (already sorted by relevance)
|
||||
search_results = await search_service.search_authors(text, limit, offset)
|
||||
|
||||
if not search_results:
|
||||
return []
|
||||
|
||||
author_ids = [result.get("id") for result in search_results if result.get("id")]
|
||||
if not author_ids:
|
||||
return []
|
||||
|
||||
# Fetch full author objects from DB
|
||||
with local_session() as session:
|
||||
# Simple query to get authors by IDs - no need for stats here
|
||||
authors_query = select(Author).filter(Author.id.in_(author_ids))
|
||||
db_authors = session.execute(authors_query).scalars().all()
|
||||
|
||||
if not db_authors:
|
||||
return []
|
||||
|
||||
# Create a dictionary for quick lookup
|
||||
authors_dict = {str(author.id): author for author in db_authors}
|
||||
|
||||
# Keep the order from search results (maintains the relevance sorting)
|
||||
ordered_authors = [authors_dict[author_id] for author_id in author_ids if author_id in authors_dict]
|
||||
|
||||
return ordered_authors
|
||||
|
||||
|
||||
def get_author_id_from(slug="", user=None, author_id=None):
|
||||
try:
|
||||
author_id = None
|
||||
|
|
|
@ -10,7 +10,7 @@ from orm.shout import Shout, ShoutAuthor, ShoutTopic
|
|||
from orm.topic import Topic
|
||||
from services.db import json_array_builder, json_builder, local_session
|
||||
from services.schema import query
|
||||
from services.search import search_text
|
||||
from services.search import search_text, get_search_count
|
||||
from services.viewed import ViewedStorage
|
||||
from utils.logger import root_logger as logger
|
||||
|
||||
|
@ -187,12 +187,10 @@ def get_shouts_with_links(info, q, limit=20, offset=0):
|
|||
"""
|
||||
shouts = []
|
||||
try:
|
||||
# logger.info(f"Starting get_shouts_with_links with limit={limit}, offset={offset}")
|
||||
q = q.limit(limit).offset(offset)
|
||||
|
||||
with local_session() as session:
|
||||
shouts_result = session.execute(q).all()
|
||||
# logger.info(f"Got {len(shouts_result) if shouts_result else 0} shouts from query")
|
||||
|
||||
if not shouts_result:
|
||||
logger.warning("No shouts found in query result")
|
||||
|
@ -203,7 +201,6 @@ def get_shouts_with_links(info, q, limit=20, offset=0):
|
|||
shout = None
|
||||
if hasattr(row, "Shout"):
|
||||
shout = row.Shout
|
||||
# logger.debug(f"Processing shout#{shout.id} at index {idx}")
|
||||
if shout:
|
||||
shout_id = int(f"{shout.id}")
|
||||
shout_dict = shout.dict()
|
||||
|
@ -231,20 +228,16 @@ def get_shouts_with_links(info, q, limit=20, offset=0):
|
|||
topics = None
|
||||
if has_field(info, "topics") and hasattr(row, "topics"):
|
||||
topics = orjson.loads(row.topics) if isinstance(row.topics, str) else row.topics
|
||||
# logger.debug(f"Shout#{shout_id} topics: {topics}")
|
||||
shout_dict["topics"] = topics
|
||||
|
||||
if has_field(info, "main_topic"):
|
||||
main_topic = None
|
||||
if hasattr(row, "main_topic"):
|
||||
# logger.debug(f"Raw main_topic for shout#{shout_id}: {row.main_topic}")
|
||||
main_topic = (
|
||||
orjson.loads(row.main_topic) if isinstance(row.main_topic, str) else row.main_topic
|
||||
)
|
||||
# logger.debug(f"Parsed main_topic for shout#{shout_id}: {main_topic}")
|
||||
|
||||
if not main_topic and topics and len(topics) > 0:
|
||||
# logger.info(f"No main_topic found for shout#{shout_id}, using first topic from list")
|
||||
main_topic = {
|
||||
"id": topics[0]["id"],
|
||||
"title": topics[0]["title"],
|
||||
|
@ -252,10 +245,8 @@ def get_shouts_with_links(info, q, limit=20, offset=0):
|
|||
"is_main": True,
|
||||
}
|
||||
elif not main_topic:
|
||||
logger.warning(f"No main_topic and no topics found for shout#{shout_id}")
|
||||
main_topic = {"id": 0, "title": "no topic", "slug": "notopic", "is_main": True}
|
||||
shout_dict["main_topic"] = main_topic
|
||||
# logger.debug(f"Final main_topic for shout#{shout_id}: {main_topic}")
|
||||
|
||||
if has_field(info, "authors") and hasattr(row, "authors"):
|
||||
shout_dict["authors"] = (
|
||||
|
@ -282,7 +273,6 @@ def get_shouts_with_links(info, q, limit=20, offset=0):
|
|||
logger.error(f"Fatal error in get_shouts_with_links: {e}", exc_info=True)
|
||||
raise
|
||||
finally:
|
||||
logger.info(f"Returning {len(shouts)} shouts from get_shouts_with_links")
|
||||
return shouts
|
||||
|
||||
|
||||
|
@ -401,33 +391,49 @@ async def load_shouts_search(_, info, text, options):
|
|||
"""
|
||||
limit = options.get("limit", 10)
|
||||
offset = options.get("offset", 0)
|
||||
if isinstance(text, str) and len(text) > 2:
|
||||
results = await search_text(text, limit, offset)
|
||||
scores = {}
|
||||
hits_ids = []
|
||||
for sr in results:
|
||||
shout_id = sr.get("id")
|
||||
if shout_id:
|
||||
shout_id = str(shout_id)
|
||||
scores[shout_id] = sr.get("score")
|
||||
hits_ids.append(shout_id)
|
||||
|
||||
q = (
|
||||
query_with_stat(info)
|
||||
if has_field(info, "stat")
|
||||
else select(Shout).filter(and_(Shout.published_at.is_not(None), Shout.deleted_at.is_(None)))
|
||||
)
|
||||
if isinstance(text, str) and len(text) > 2:
|
||||
# Get search results with pagination
|
||||
results = await search_text(text, limit, offset)
|
||||
|
||||
if not results:
|
||||
logger.info(f"No search results found for '{text}'")
|
||||
return []
|
||||
|
||||
# Extract IDs in the order from the search engine
|
||||
hits_ids = [str(sr.get("id")) for sr in results if sr.get("id")]
|
||||
|
||||
# Query DB for only the IDs in the current page
|
||||
q = query_with_stat(info)
|
||||
q = q.filter(Shout.id.in_(hits_ids))
|
||||
q = apply_filters(q, options)
|
||||
q = apply_sorting(q, options)
|
||||
shouts = get_shouts_with_links(info, q, limit, offset)
|
||||
for shout in shouts:
|
||||
shout.score = scores[f"{shout.id}"]
|
||||
shouts.sort(key=lambda x: x.score, reverse=True)
|
||||
return shouts
|
||||
q = apply_filters(q, options.get("filters", {}))
|
||||
|
||||
shouts = get_shouts_with_links(info, q, len(hits_ids), 0)
|
||||
|
||||
# Reorder shouts to match the order from hits_ids
|
||||
shouts_dict = {str(shout['id']): shout for shout in shouts}
|
||||
ordered_shouts = [shouts_dict[shout_id] for shout_id in hits_ids if shout_id in shouts_dict]
|
||||
|
||||
return ordered_shouts
|
||||
return []
|
||||
|
||||
|
||||
@query.field("get_search_results_count")
|
||||
async def get_search_results_count(_, info, text):
|
||||
"""
|
||||
Returns the total count of search results for a search query.
|
||||
|
||||
:param _: Root query object (unused)
|
||||
:param info: GraphQL context information
|
||||
:param text: Search query text
|
||||
:return: Total count of results
|
||||
"""
|
||||
if isinstance(text, str) and len(text) > 2:
|
||||
count = await get_search_count(text)
|
||||
return {"count": count}
|
||||
return {"count": 0}
|
||||
|
||||
|
||||
@query.field("load_shouts_unrated")
|
||||
async def load_shouts_unrated(_, info, options):
|
||||
"""
|
||||
|
|
|
@ -4,7 +4,7 @@ type Query {
|
|||
get_author_id(user: String!): Author
|
||||
get_authors_all: [Author]
|
||||
load_authors_by(by: AuthorsBy!, limit: Int, offset: Int): [Author]
|
||||
# search_authors(what: String!): [Author]
|
||||
load_authors_search(text: String!, limit: Int, offset: Int): [Author!] # Search for authors by name or bio
|
||||
|
||||
# community
|
||||
get_community: Community
|
||||
|
@ -33,6 +33,7 @@ type Query {
|
|||
get_shout(slug: String, shout_id: Int): Shout
|
||||
load_shouts_by(options: LoadShoutsOptions): [Shout]
|
||||
load_shouts_search(text: String!, options: LoadShoutsOptions): [SearchResult]
|
||||
get_search_results_count(text: String!): CountResult!
|
||||
load_shouts_bookmarked(options: LoadShoutsOptions): [Shout]
|
||||
|
||||
# rating
|
||||
|
|
|
@ -213,6 +213,7 @@ type CommonResult {
|
|||
}
|
||||
|
||||
type SearchResult {
|
||||
id: Int!
|
||||
slug: String!
|
||||
title: String!
|
||||
cover: String
|
||||
|
@ -280,3 +281,7 @@ type MyRateComment {
|
|||
my_rate: ReactionKind
|
||||
}
|
||||
|
||||
type CountResult {
|
||||
count: Int!
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ from sqlalchemy import (
|
|||
inspect,
|
||||
text,
|
||||
)
|
||||
from sqlalchemy.orm import Session, configure_mappers, declarative_base
|
||||
from sqlalchemy.orm import Session, configure_mappers, declarative_base, joinedload
|
||||
from sqlalchemy.sql.schema import Table
|
||||
|
||||
from settings import DB_URL
|
||||
|
@ -259,3 +259,32 @@ def get_json_builder():
|
|||
|
||||
# Используем их в коде
|
||||
json_builder, json_array_builder, json_cast = get_json_builder()
|
||||
|
||||
# Fetch all shouts, with authors preloaded
|
||||
# This function is used for search indexing
|
||||
|
||||
async def fetch_all_shouts(session=None):
|
||||
"""Fetch all published shouts for search indexing with authors preloaded"""
|
||||
from orm.shout import Shout
|
||||
|
||||
close_session = False
|
||||
if session is None:
|
||||
session = local_session()
|
||||
close_session = True
|
||||
|
||||
try:
|
||||
# Fetch only published and non-deleted shouts with authors preloaded
|
||||
query = session.query(Shout).options(
|
||||
joinedload(Shout.authors)
|
||||
).filter(
|
||||
Shout.published_at.is_not(None),
|
||||
Shout.deleted_at.is_(None)
|
||||
)
|
||||
shouts = query.all()
|
||||
return shouts
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching shouts for search indexing: {e}")
|
||||
return []
|
||||
finally:
|
||||
if close_session:
|
||||
session.close()
|
|
@ -29,12 +29,19 @@ async def request_graphql_data(gql, url=AUTH_URL, headers=None):
|
|||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(url, json=gql, headers=headers)
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
errors = data.get("errors")
|
||||
if errors:
|
||||
logger.error(f"{url} response: {data}")
|
||||
# Check if the response has content before parsing
|
||||
if response.content and len(response.content.strip()) > 0:
|
||||
try:
|
||||
data = response.json()
|
||||
errors = data.get("errors")
|
||||
if errors:
|
||||
logger.error(f"{url} response: {data}")
|
||||
else:
|
||||
return data
|
||||
except Exception as json_err:
|
||||
logger.error(f"JSON decode error: {json_err}, Response content: {response.text[:100]}")
|
||||
else:
|
||||
return data
|
||||
logger.error(f"{url}: Response is empty")
|
||||
else:
|
||||
logger.error(f"{url}: {response.status_code} {response.text}")
|
||||
except Exception as _e:
|
||||
|
|
1080
services/search.py
1080
services/search.py
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user