diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 2365f893..00000000 --- a/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM python:3.8 - -EXPOSE 8080 - -RUN /usr/local/bin/python -m pip install --upgrade pip - -WORKDIR /usr/src/app - -COPY requirements.txt ./ - -RUN set -ex && pip install -r requirements.txt - -COPY . . - -CMD ["python", "server.py"] diff --git a/Pipfile b/Pipfile deleted file mode 100644 index d51ca80f..00000000 --- a/Pipfile +++ /dev/null @@ -1,29 +0,0 @@ -[[source]] -url = "https://pypi.org/simple" -verify_ssl = true -name = "pypi" - -[packages] -aioredis = "*" -ariadne = "*" -starlette = "*" -uvicorn = "*" -pydantic = "*" -passlib = "*" -PyJWT = ">=2.4.0" -SQLAlchemy = "*" -itsdangerous = "*" -httpx = ">=0.23.0" -psycopg2-binary = "*" -Authlib = "*" -bson = "*" -python-frontmatter = "*" -bs4 = "*" -transliterate = "*" -psycopg2 = "*" -requests = "*" - -[dev-packages] - -[requires] -python_version = "3.8" diff --git a/migrate.py b/migrate.py deleted file mode 100644 index 838d3485..00000000 --- a/migrate.py +++ /dev/null @@ -1,312 +0,0 @@ -''' cmd managed migration ''' -from datetime import datetime -import json -import subprocess -import sys - -from click import prompt -# from migration.export import export_email_subscriptions -from migration.export import export_mdx, export_slug -from migration.tables.users import migrate as migrateUser -from migration.tables.users import migrate_2stage as migrateUser_2stage -from migration.tables.content_items import get_shout_slug, migrate as migrateShout -from migration.tables.topics import migrate as migrateTopic -from migration.tables.comments import migrate as migrateComment -from migration.tables.comments import migrate_2stage as migrateComment_2stage -from orm.reaction import Reaction - -TODAY = datetime.strftime(datetime.now(), '%Y%m%d') - -OLD_DATE = '2016-03-05 22:22:00.350000' - - -def users_handle(storage): - ''' migrating users first ''' - counter = 0 - id_map = {} - print('[migration] migrating %d users' % (len(storage['users']['data']))) - for entry in storage['users']['data']: - oid = entry['_id'] - user = migrateUser(entry) - storage['users']['by_oid'][oid] = user # full - del user['password'] - del user['notifications'] - del user['emailConfirmed'] - del user['username'] - del user['email'] - storage['users']['by_slug'][user['slug']] = user # public - id_map[user['oid']] = user['slug'] - counter += 1 - ce = 0 - for entry in storage['users']['data']: - ce += migrateUser_2stage(entry, id_map) - return storage - - -def topics_handle(storage): - ''' topics from categories and tags ''' - counter = 0 - for t in (storage['topics']['tags'] + storage['topics']['cats']): - if t['slug'] in storage['replacements']: - t['slug'] = storage['replacements'][t['slug']] - topic = migrateTopic(t) - storage['topics']['by_oid'][t['_id']] = topic - storage['topics']['by_slug'][t['slug']] = topic - counter += 1 - else: - print('[migration] topic ' + t['slug'] + ' ignored') - for oldslug, newslug in storage['replacements'].items(): - if oldslug != newslug and oldslug in storage['topics']['by_slug']: - oid = storage['topics']['by_slug'][oldslug]['_id'] - del storage['topics']['by_slug'][oldslug] - storage['topics']['by_oid'][oid] = storage['topics']['by_slug'][newslug] - print('[migration] ' + str(counter) + ' topics migrated') - print('[migration] ' + str(len(storage['topics'] - ['by_oid'].values())) + ' topics by oid') - print('[migration] ' + str(len(storage['topics'] - ['by_slug'].values())) + ' topics by slug') - # raise Exception - return storage - - -def shouts_handle(storage, args): - ''' migrating content items one by one ''' - counter = 0 - discours_author = 0 - pub_counter = 0 - for entry in storage['shouts']['data']: - # slug - slug = get_shout_slug(entry) - - # single slug mode - if '-' in args and slug not in args: continue - - # migrate - shout = migrateShout(entry, storage) - storage['shouts']['by_oid'][entry['_id']] = shout - storage['shouts']['by_slug'][shout['slug']] = shout - # shouts.topics - if not shout['topics']: print('[migration] no topics!') - - # wuth author - author = shout['authors'][0].slug - if author == 'discours': discours_author += 1 - # print('[migration] ' + shout['slug'] + ' with author ' + author) - - if entry.get('published'): - if 'mdx' in args: export_mdx(shout) - pub_counter += 1 - - # print main counter - counter += 1 - line = str(counter+1) + ': ' + shout['slug'] + " @" + author - print(line) - - print('[migration] ' + str(counter) + ' content items were migrated') - print('[migration] ' + str(pub_counter) + ' have been published') - print('[migration] ' + str(discours_author) + ' authored by @discours') - return storage - - -def comments_handle(storage): - id_map = {} - ignored_counter = 0 - missed_shouts = {} - for oldcomment in storage['reactions']['data']: - if not oldcomment.get('deleted'): - reaction = migrateComment(oldcomment, storage) - if type(reaction) == str: - missed_shouts[reaction] = oldcomment - elif type(reaction) == Reaction: - reaction = reaction.dict() - id = reaction['id'] - oid = reaction['oid'] - id_map[oid] = id - else: - ignored_counter += 1 - - for reaction in storage['reactions']['data']: migrateComment_2stage( - reaction, id_map) - print('[migration] ' + str(len(id_map)) + ' comments migrated') - print('[migration] ' + str(ignored_counter) + ' comments ignored') - print('[migration] ' + str(len(missed_shouts.keys())) + - ' commented shouts missed') - missed_counter = 0 - for missed in missed_shouts.values(): - missed_counter += len(missed) - print('[migration] ' + str(missed_counter) + ' comments dropped') - return storage - - -def bson_handle(): - # decode bson # preparing data - from migration import bson2json - bson2json.json_tables() - - -def export_one(slug, storage): - topics_handle(storage) - users_handle(storage) - shouts_handle(storage) - export_slug(slug, storage) - - -def all_handle(storage, args): - print('[migration] handle everything') - users_handle(storage) - topics_handle(storage) - shouts_handle(storage, args) - comments_handle(storage) - # export_email_subscriptions() - print('[migration] done!') - - -def data_load(): - storage = { - 'content_items': { - 'by_oid': {}, - 'by_slug': {}, - }, - 'shouts': { - 'by_oid': {}, - 'by_slug': {}, - 'data': [] - }, - 'reactions': { - 'by_oid': {}, - 'by_slug': {}, - 'by_content': {}, - 'data': [] - }, - 'topics': { - 'by_oid': {}, - 'by_slug': {}, - 'cats': [], - 'tags': [], - }, - 'users': { - 'by_oid': {}, - 'by_slug': {}, - 'data': [] - }, - 'replacements': json.loads(open('migration/tables/replacements.json').read()) - } - users_data = [] - tags_data = [] - cats_data = [] - comments_data = [] - content_data = [] - try: - users_data = json.loads(open('migration/data/users.json').read()) - print('[migration] ' + str(len(users_data)) + ' users ') - tags_data = json.loads(open('migration/data/tags.json').read()) - storage['topics']['tags'] = tags_data - print('[migration] ' + str(len(tags_data)) + ' tags ') - cats_data = json.loads( - open('migration/data/content_item_categories.json').read()) - storage['topics']['cats'] = cats_data - print('[migration] ' + str(len(cats_data)) + ' cats ') - comments_data = json.loads(open('migration/data/comments.json').read()) - storage['reactions']['data'] = comments_data - print('[migration] ' + str(len(comments_data)) + ' comments ') - content_data = json.loads(open('migration/data/content_items.json').read()) - storage['shouts']['data'] = content_data - print('[migration] ' + str(len(content_data)) + ' content items ') - # fill out storage - for x in users_data: - storage['users']['by_oid'][x['_id']] = x - # storage['users']['by_slug'][x['slug']] = x - # no user.slug yet - print('[migration] ' + str(len(storage['users'] - ['by_oid'].keys())) + ' users by oid') - for x in tags_data: - storage['topics']['by_oid'][x['_id']] = x - storage['topics']['by_slug'][x['slug']] = x - for x in cats_data: - storage['topics']['by_oid'][x['_id']] = x - storage['topics']['by_slug'][x['slug']] = x - print('[migration] ' + str(len(storage['topics'] - ['by_slug'].keys())) + ' topics by slug') - for item in content_data: - slug = get_shout_slug(item) - storage['content_items']['by_slug'][slug] = item - storage['content_items']['by_oid'][item['_id']] = item - print('[migration] ' + str(len(content_data)) + ' content items') - for x in comments_data: - storage['reactions']['by_oid'][x['_id']] = x - cid = x['contentItem'] - storage['reactions']['by_content'][cid] = x - ci = storage['content_items']['by_oid'].get(cid, {}) - if 'slug' in ci: storage['reactions']['by_slug'][ci['slug']] = x - print('[migration] ' + str(len(storage['reactions'] - ['by_content'].keys())) + ' with comments') - except Exception as e: raise e - storage['users']['data'] = users_data - storage['topics']['tags'] = tags_data - storage['topics']['cats'] = cats_data - storage['shouts']['data'] = content_data - storage['reactions']['data'] = comments_data - return storage - - -def mongo_download(url): - print('[migration] mongodb url: ' + url) - open('migration/data/mongodb.url', 'w').write(url) - logname = 'migration/data/mongo-' + TODAY + '.log' - subprocess.call([ - 'mongodump', - '--uri', url, - '--forceTableScan', - ], open(logname, 'w')) - - -def create_pgdump(): - # pg_dump -d discoursio > 20220714-pgdump.sql - subprocess.Popen( - [ 'pg_dump', 'postgres://localhost:5432/discoursio', '-f', 'migration/data/' + TODAY + '-pgdump.log'], - stderr = subprocess.STDOUT - ) - # scp 20220714-pgdump.sql root@build.discours.io:/root/discours-backend/. - subprocess.call([ - 'scp', - 'migration/data/' + TODAY + '-pgdump.sql', - 'root@build.discours.io:/root/discours-backend/.' - ]) - print('[migration] pg_dump up') - - -def handle_auto(): - print('[migration] no command given, auto mode') - import os - if os.path.isfile('migration/data/mongo-' + TODAY + '.log'): - url=open('migration/data/mongodb.url', 'r').read() - if not url: - url=prompt('provide mongo url:') - open('migration/data/mongodb.url', 'w').write(url) - mongo_download(url) - bson_handle() - all_handle(data_load(), sys.argv) - create_pgdump() - -def migrate(): - import sys - - if len(sys.argv) > 1: - cmd=sys.argv[1] - print('[migration] command: ' + cmd) - if cmd == 'mongodb': - mongo_download(sys.argv[2]) - elif cmd == 'bson': - bson_handle() - else: - storage=data_load() - if cmd == '-': export_one(sys.argv[2], storage) - else: all_handle(storage, sys.argv) - elif len(sys.argv) == 1: - handle_auto() - else: - print('[migration] usage: python ./migration ') - print('[migration] commands: mongodb, bson, all, all mdx, - ') - -if __name__ == '__main__': - migrate() diff --git a/migration/.DS_Store b/migration/.DS_Store deleted file mode 100644 index 7dc3eb8a..00000000 Binary files a/migration/.DS_Store and /dev/null differ diff --git a/migration/README.md b/migration/README.md deleted file mode 100644 index 12e5ee6a..00000000 --- a/migration/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# discours-migration - -First, put the `data` into this folder. - -## Install - -```sh -pipenv install -r requirements.txt -``` - -## Using - -Put the unpacked mongodump to the `data` folder and operate with -`pipenv shell && python` -#### get old data jsons - -```py -import bson2json - -bson2json.json_tables() # creates all the needed data json from bson mongodump -``` - -#### migrate all - -```sh -pipenv install -pipenv run python migrate.py all -``` -#### or migrate all with mdx exports - -```sh -pipenv install -pipenv run python migrate.py all mdx -``` - -Note: this will create db entries and it is not tolerant to existed unique -email. - -#### or one shout by slug - -```sh -pipenv run python migrate.py - -``` diff --git a/migration/__init__.py b/migration/__init__.py deleted file mode 100644 index e2750039..00000000 --- a/migration/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__all__ = ["tables", "bson2json", "html2md"] \ No newline at end of file diff --git a/migration/bson2json.py b/migration/bson2json.py deleted file mode 100644 index ba2802db..00000000 --- a/migration/bson2json.py +++ /dev/null @@ -1,28 +0,0 @@ -import os -import bson -import json - -from migration.utils import DateTimeEncoder - -def json_tables(): - print('[migration] unpack dump/discours/*.bson to migration/data/*.json') - data = { - "content_items": [], - "content_item_categories": [], - "tags": [], - "email_subscriptions": [], - "users": [], - "comments": [] - } - for table in data.keys(): - lc = [] - with open('dump/discours/'+table+'.bson', 'rb') as f: - bs = f.read() - f.close() - base = 0 - while base < len(bs): - base, d = bson.decode_document(bs, base) - lc.append(d) - data[table] = lc - open(os.getcwd() + '/migration/data/'+table+'.json', 'w').write(json.dumps(lc,cls=DateTimeEncoder)) - diff --git a/migration/export.py b/migration/export.py deleted file mode 100644 index 1b5ef994..00000000 --- a/migration/export.py +++ /dev/null @@ -1,106 +0,0 @@ - -from datetime import datetime -import json -import os -import frontmatter -from migration.extract import extract_html, prepare_body -# from migration.tables.users import migrate_email_subscription -from migration.utils import DateTimeEncoder - -OLD_DATE = '2016-03-05 22:22:00.350000' -EXPORT_DEST = '../discoursio-web/data/' -parentDir = '/'.join(os.getcwd().split('/')[:-1]) -contentDir = parentDir + '/discoursio-web/content/' -ts = datetime.now() - -def get_metadata(r): - authors = [] - for a in r['authors']: - authors.append({ # a short version for public listings - 'slug': a.slug or 'discours', - 'name': a.name or 'Дискурс', - 'userpic': a.userpic or 'https://discours.io/static/img/discours.png' - }) - metadata = {} - metadata['title'] = r.get('title', '').replace('{', '(').replace('}', ')') - metadata['authors'] = authors - metadata['createdAt'] = r.get('createdAt', ts) - metadata['layout'] = r['layout'] - metadata['topics'] = [topic for topic in r['topics']] - metadata['topics'].sort() - if r.get('cover', False): metadata['cover'] = r.get('cover') - return metadata - -def export_mdx(r): - # print('[export] mdx %s' % r['slug']) - content = '' - metadata = get_metadata(r) - content = frontmatter.dumps(frontmatter.Post(r['body'], **metadata)) - ext = 'mdx' - filepath = contentDir + r['slug'] - bc = bytes(content,'utf-8').decode('utf-8','ignore') - open(filepath + '.' + ext, 'w').write(bc) - -def export_body(shout, storage): - entry = storage['content_items']['by_oid'][shout['oid']] - if entry: - shout['body'] = prepare_body(entry) - export_mdx(shout) - print('[export] html for %s' % shout['slug']) - body = extract_html(entry) - open(contentDir + shout['slug'] + '.html', 'w').write(body) - else: - raise Exception('no content_items entry found') - -def export_slug(slug, storage): - shout = storage['shouts']['by_slug'][slug] - shout = storage['shouts']['by_slug'].get(slug) - assert shout, '[export] no shout found by slug: %s ' % slug - author = shout['authors'][0] - assert author, '[export] no author error' - export_body(shout, storage) - -def export_email_subscriptions(): - email_subscriptions_data = json.loads(open('migration/data/email_subscriptions.json').read()) - for data in email_subscriptions_data: - # migrate_email_subscription(data) - pass - print('[migration] ' + str(len(email_subscriptions_data)) + ' email subscriptions exported') - -def export_shouts(storage): - # update what was just migrated or load json again - if len(storage['users']['by_slugs'].keys()) == 0: - storage['users']['by_slugs'] = json.loads(open(EXPORT_DEST + 'authors.json').read()) - print('[migration] ' + str(len(storage['users']['by_slugs'].keys())) + ' exported authors ') - if len(storage['shouts']['by_slugs'].keys()) == 0: - storage['shouts']['by_slugs'] = json.loads(open(EXPORT_DEST + 'articles.json').read()) - print('[migration] ' + str(len(storage['shouts']['by_slugs'].keys())) + ' exported articles ') - for slug in storage['shouts']['by_slugs'].keys(): export_slug(slug, storage) - -def export_json(export_articles = {}, export_authors = {}, export_topics = {}, export_comments = {}): - open(EXPORT_DEST + 'authors.json', 'w').write(json.dumps(export_authors, - cls=DateTimeEncoder, - indent=4, - sort_keys=True, - ensure_ascii=False)) - print('[migration] ' + str(len(export_authors.items())) + ' authors exported') - open(EXPORT_DEST + 'topics.json', 'w').write(json.dumps(export_topics, - cls=DateTimeEncoder, - indent=4, - sort_keys=True, - ensure_ascii=False)) - print('[migration] ' + str(len(export_topics.keys())) + ' topics exported') - - open(EXPORT_DEST + 'articles.json', 'w').write(json.dumps(export_articles, - cls=DateTimeEncoder, - indent=4, - sort_keys=True, - ensure_ascii=False)) - print('[migration] ' + str(len(export_articles.items())) + ' articles exported') - open(EXPORT_DEST + 'comments.json', 'w').write(json.dumps(export_comments, - cls=DateTimeEncoder, - indent=4, - sort_keys=True, - ensure_ascii=False)) - print('[migration] ' + str(len(export_comments.items())) + ' exported articles with comments') - diff --git a/migration/extract.py b/migration/extract.py deleted file mode 100644 index 55cc5d0c..00000000 --- a/migration/extract.py +++ /dev/null @@ -1,283 +0,0 @@ -import os -import re -import base64 -import sys -from migration.html2text import html2text - -TOOLTIP_REGEX = r'(\/\/\/(.+)\/\/\/)' -contentDir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'discoursio-web', 'content') -s3 = 'https://discours-io.s3.amazonaws.com/' -cdn = 'https://assets.discours.io' - -def replace_tooltips(body): - # FIXME: if you prefer regexp - newbody = body - matches = list(re.finditer(TOOLTIP_REGEX, body, re.IGNORECASE | re.MULTILINE))[1:] - for match in matches: - newbody = body.replace(match.group(1), '') # FIXME: doesn't work - if len(matches) > 0: - print('[extract] found %d tooltips' % len(matches)) - return newbody - - -def place_tooltips(body): - parts = body.split('&&&') - l = len(parts) - newparts = list(parts) - placed = False - if l & 1: - if l > 1: - i = 1 - print('[extract] found %d tooltips' % (l-1)) - for part in parts[1:]: - if i & 1: - placed = True - if 'a class="footnote-url" href=' in part: - print('[extract] footnote: ' + part) - fn = 'a class="footnote-url" href="' - link = part.split(fn,1)[1].split('"', 1)[0] - extracted_part = part.split(fn,1)[0] + ' ' + part.split('/', 1)[-1] - newparts[i] = '' + extracted_part + '' - else: - newparts[i] = '%s' % part - # print('[extract] ' + newparts[i]) - else: - # print('[extract] ' + part[:10] + '..') - newparts[i] = part - i += 1 - return (''.join(newparts), placed) - -IMG_REGEX = r"\!\[(.*?)\]\((data\:image\/(png|jpeg|jpg);base64\,((?:[A-Za-z\d+\/]{4})*(?:[A-Za-z\d+\/]{3}=|[A-Za-z\d+\/]{2}==)))\)" -public = '../discoursio-web/public' -cache = {} - - -def reextract_images(body, oid): - # FIXME: if you prefer regexp - matches = list(re.finditer(IMG_REGEX, body, re.IGNORECASE | re.MULTILINE))[1:] - i = 0 - for match in matches: - print('[extract] image ' + match.group(1)) - ext = match.group(3) - name = oid + str(i) - link = public + '/upload/image-' + name + '.' + ext - img = match.group(4) - title = match.group(1) # FIXME: this is not the title - if img not in cache: - content = base64.b64decode(img + '==') - print(str(len(img)) + ' image bytes been written') - open('../' + link, 'wb').write(content) - cache[img] = name - i += 1 - else: - print('[extract] image cached ' + cache[img]) - body.replace(str(match), '![' + title + '](' + cdn + link + ')') # FIXME: this does not work - return body - -IMAGES = { - 'data:image/png': 'png', - 'data:image/jpg': 'jpg', - 'data:image/jpeg': 'jpg', -} - -b64 = ';base64,' - -def extract_imageparts(bodyparts, prefix): - # recursive loop - newparts = list(bodyparts) - for current in bodyparts: - i = bodyparts.index(current) - for mime in IMAGES.keys(): - if mime == current[-len(mime):] and (i + 1 < len(bodyparts)): - print('[extract] ' + mime) - next = bodyparts[i+1] - ext = IMAGES[mime] - b64end = next.index(')') - b64encoded = next[:b64end] - name = prefix + '-' + str(len(cache)) - link = '/upload/image-' + name + '.' + ext - print('[extract] name: ' + name) - print('[extract] link: ' + link) - print('[extract] %d bytes' % len(b64encoded)) - if b64encoded not in cache: - try: - content = base64.b64decode(b64encoded + '==') - open(public + link, 'wb').write(content) - print('[extract] ' +str(len(content)) + ' image bytes been written') - cache[b64encoded] = name - except: - raise Exception - # raise Exception('[extract] error decoding image %r' %b64encoded) - else: - print('[extract] cached link ' + cache[b64encoded]) - name = cache[b64encoded] - link = cdn + '/upload/image-' + name + '.' + ext - newparts[i] = current[:-len(mime)] + current[-len(mime):] + link + next[-b64end:] - newparts[i+1] = next[:-b64end] - break - return extract_imageparts(newparts[i] + newparts[i+1] + b64.join(bodyparts[i+2:]), prefix) \ - if len(bodyparts) > (i + 1) else ''.join(newparts) - -def extract_dataimages(parts, prefix): - newparts = list(parts) - for part in parts: - i = parts.index(part) - if part.endswith(']('): - [ext, rest] = parts[i+1].split(b64) - name = prefix + '-' + str(len(cache)) - if ext == '/jpeg': ext = 'jpg' - else: ext = ext.replace('/', '') - link = '/upload/image-' + name + '.' + ext - print('[extract] filename: ' + link) - b64end = rest.find(')') - if b64end !=-1: - b64encoded = rest[:b64end] - print('[extract] %d text bytes' % len(b64encoded)) - # write if not cached - if b64encoded not in cache: - try: - content = base64.b64decode(b64encoded + '==') - open(public + link, 'wb').write(content) - print('[extract] ' +str(len(content)) + ' image bytes') - cache[b64encoded] = name - except: - raise Exception - # raise Exception('[extract] error decoding image %r' %b64encoded) - else: - print('[extract] 0 image bytes, cached for ' + cache[b64encoded]) - name = cache[b64encoded] - - # update link with CDN - link = cdn + '/upload/image-' + name + '.' + ext - - # patch newparts - newparts[i+1] = link + rest[b64end:] - else: - raise Exception('cannot find the end of base64 encoded string') - else: - print('[extract] dataimage skipping part ' + str(i)) - continue - return ''.join(newparts) - -di = 'data:image' - -def extract_images(body, oid): - newbody = '' - body = body\ - .replace('\n! []('+di, '\n ![]('+di)\ - .replace('\n[]('+di, '\n![]('+di)\ - .replace(' []('+di, ' ![]('+di) - parts = body.split(di) - i = 0 - if len(parts) > 1: newbody = extract_dataimages(parts, oid) - else: newbody = body - return newbody - - -def cleanup(body): - newbody = body\ - .replace('<', '').replace('>', '')\ - .replace('{', '(').replace('}', ')')\ - .replace('…', '...')\ - .replace(' __ ', ' ')\ - .replace('_ _', ' ')\ - .replace('****', '')\ - .replace('\u00a0', ' ')\ - .replace('\u02c6', '^')\ - .replace('\u00a0',' ')\ - .replace('\ufeff', '')\ - .replace('\u200b', '')\ - .replace('\u200c', '')\ - # .replace('\u2212', '-') - return newbody - -def extract(body, oid): - newbody = body - if newbody: - newbody = extract_images(newbody, oid) - if not newbody: raise Exception('extract_images error') - newbody = cleanup(newbody) - if not newbody: raise Exception('cleanup error') - newbody, placed = place_tooltips(newbody) - if not newbody: raise Exception('place_tooltips error') - if placed: - newbody = 'import Tooltip from \'$/components/Article/Tooltip\'\n\n' + newbody - return newbody - -def prepare_body(entry): - # body modifications - body = '' - kind = entry.get('type') - addon = '' - if kind == 'Video': - addon = '' - for m in entry.get('media', []): - if 'youtubeId' in m: addon += '\n' - elif 'vimeoId' in m: addon += '\n' - else: - print('[extract] media is not supported') - print(m) - body = 'import * as Social from \'solid-social\'\n\n' + addon - - elif kind == 'Music': - addon = '' - for m in entry.get('media', []): - artist = m.get('performer') - trackname = '' - if artist: trackname += artist + ' - ' - if 'title' in m: trackname += m.get('title','') - addon += '\n' - body = 'import MusicPlayer from \'$/components/Article/MusicPlayer\'\n\n' + addon - - body_orig = extract_html(entry) - if body_orig: body += extract(html2text(body_orig), entry['_id']) - if not body: print('[extract] empty MDX body') - return body - -def extract_html(entry): - body_orig = entry.get('body') or '' - media = entry.get('media', []) - kind = entry.get('type') or '' - print('[extract] kind: ' + kind) - mbodies = set([]) - if media: - # print('[extract] media is found') - for m in media: - mbody = m.get('body', '') - addon = '' - if kind == 'Literature': - mbody = m.get('literatureBody') or m.get('body', '') - elif kind == 'Image': - cover = '' - if 'thumborId' in entry: cover = cdn + '/unsafe/1600x/' + entry['thumborId'] - if not cover: - if 'image' in entry: cover = entry['image'].get('url', '') - if 'cloudinary' in cover: cover = '' - # else: print('[extract] cover: ' + cover) - title = m.get('title','').replace('\n', ' ').replace(' ', ' ') - u = m.get('thumborId') or cover or '' - if title: addon += '

' + title + '

\n' - if not u.startswith('http'): u = s3 + u - if not u: print('[extract] no image url for ' + str(m)) - if 'cloudinary' in u: u = 'img/lost.svg' - if u != cover or (u == cover and media.index(m) == 0): - addon += '\"'+\n' - if addon: - body_orig += addon - # print('[extract] item addon: ' + addon) - # if addon: print('[extract] addon: %s' % addon) - if mbody and mbody not in mbodies: - mbodies.add(mbody) - body_orig += mbody - if len(list(mbodies)) != len(media): - print('[extract] %d/%d media item bodies appended' % (len(list(mbodies)),len(media))) - # print('[extract] media items body: \n' + body_orig) - if not body_orig: - for up in entry.get('bodyHistory', []) or []: - body_orig = up.get('text', '') or '' - if body_orig: - print('[extract] got html body from history') - break - if not body_orig: print('[extract] empty HTML body') - # body_html = str(BeautifulSoup(body_orig, features="html.parser")) - return body_orig \ No newline at end of file diff --git a/migration/html2text/__init__.py b/migration/html2text/__init__.py deleted file mode 100644 index 26810d42..00000000 --- a/migration/html2text/__init__.py +++ /dev/null @@ -1,1041 +0,0 @@ -"""html2text: Turn HTML into equivalent Markdown-structured text.""" - -import html.entities -import html.parser -import re -import string -import urllib.parse as urlparse -from textwrap import wrap -from typing import Dict, List, Optional, Tuple, Union - -from . import config -from .elements import AnchorElement, ListElement -from .typing import OutCallback -from .utils import ( - dumb_css_parser, - element_style, - escape_md, - escape_md_section, - google_fixed_width_font, - google_has_height, - google_list_style, - google_text_emphasis, - hn, - list_numbering_start, - pad_tables_in_text, - skipwrap, - unifiable_n, -) - -__version__ = (2020, 1, 16) - - -# TODO: -# Support decoded entities with UNIFIABLE. - - -class HTML2Text(html.parser.HTMLParser): - def __init__( - self, - out: Optional[OutCallback] = None, - baseurl: str = "", - bodywidth: int = config.BODY_WIDTH, - ) -> None: - """ - Input parameters: - out: possible custom replacement for self.outtextf (which - appends lines of text). - baseurl: base URL of the document we process - """ - super().__init__(convert_charrefs=False) - - # Config options - self.split_next_td = False - self.td_count = 0 - self.table_start = False - self.unicode_snob = config.UNICODE_SNOB # covered in cli - self.escape_snob = config.ESCAPE_SNOB # covered in cli - self.links_each_paragraph = config.LINKS_EACH_PARAGRAPH - self.body_width = bodywidth # covered in cli - self.skip_internal_links = config.SKIP_INTERNAL_LINKS # covered in cli - self.inline_links = config.INLINE_LINKS # covered in cli - self.protect_links = config.PROTECT_LINKS # covered in cli - self.google_list_indent = config.GOOGLE_LIST_INDENT # covered in cli - self.ignore_links = config.IGNORE_ANCHORS # covered in cli - self.ignore_mailto_links = config.IGNORE_MAILTO_LINKS # covered in cli - self.ignore_images = config.IGNORE_IMAGES # covered in cli - self.images_as_html = config.IMAGES_AS_HTML # covered in cli - self.images_to_alt = config.IMAGES_TO_ALT # covered in cli - self.images_with_size = config.IMAGES_WITH_SIZE # covered in cli - self.ignore_emphasis = config.IGNORE_EMPHASIS # covered in cli - self.bypass_tables = config.BYPASS_TABLES # covered in cli - self.ignore_tables = config.IGNORE_TABLES # covered in cli - self.google_doc = False # covered in cli - self.ul_item_mark = "*" # covered in cli - self.emphasis_mark = "_" # covered in cli - self.strong_mark = "**" - self.single_line_break = config.SINGLE_LINE_BREAK # covered in cli - self.use_automatic_links = config.USE_AUTOMATIC_LINKS # covered in cli - self.hide_strikethrough = False # covered in cli - self.mark_code = config.MARK_CODE - self.wrap_list_items = config.WRAP_LIST_ITEMS # covered in cli - self.wrap_links = config.WRAP_LINKS # covered in cli - self.wrap_tables = config.WRAP_TABLES - self.pad_tables = config.PAD_TABLES # covered in cli - self.default_image_alt = config.DEFAULT_IMAGE_ALT # covered in cli - self.tag_callback = None - self.open_quote = config.OPEN_QUOTE # covered in cli - self.close_quote = config.CLOSE_QUOTE # covered in cli - self.header_id = None - self.span_highlight = False - self.span_lead = False - - if out is None: - self.out = self.outtextf - else: - self.out = out - - # empty list to store output characters before they are "joined" - self.outtextlist = [] # type: List[str] - - self.quiet = 0 - self.p_p = 0 # number of newline character to print before next output - self.outcount = 0 - self.start = True - self.space = False - self.a = [] # type: List[AnchorElement] - self.astack = [] # type: List[Optional[Dict[str, Optional[str]]]] - self.maybe_automatic_link = None # type: Optional[str] - self.empty_link = False - self.absolute_url_matcher = re.compile(r"^[a-zA-Z+]+://") - self.acount = 0 - self.list = [] # type: List[ListElement] - self.blockquote = 0 - self.pre = False - self.startpre = False - self.code = False - self.quote = False - self.br_toggle = "" - self.lastWasNL = False - self.lastWasList = False - self.style = 0 - self.style_def = {} # type: Dict[str, Dict[str, str]] - self.tag_stack = ( - [] - ) # type: List[Tuple[str, Dict[str, Optional[str]], Dict[str, str]]] - self.emphasis = 0 - self.drop_white_space = 0 - self.inheader = False - # Current abbreviation definition - self.abbr_title = None # type: Optional[str] - # Last inner HTML (for abbr being defined) - self.abbr_data = None # type: Optional[str] - # Stack of abbreviations to write later - self.abbr_list = {} # type: Dict[str, str] - self.baseurl = baseurl - self.stressed = False - self.preceding_stressed = False - self.preceding_data = "" - self.current_tag = "" - self.current_class = "" - - config.UNIFIABLE["nbsp"] = " _place_holder;" - - def feed(self, data: str) -> None: - data = data.replace("", "") - super().feed(data) - - def handle(self, data: str) -> str: - self.feed(data) - self.feed("") - markdown = self.optwrap(self.finish()) - if self.pad_tables: - return pad_tables_in_text(markdown) - else: - return markdown - - def outtextf(self, s: str) -> None: - self.outtextlist.append(s) - if s: - self.lastWasNL = s[-1] == "\n" - - def finish(self) -> str: - self.close() - - self.pbr() - self.o("", force="end") - - outtext = "".join(self.outtextlist) - - if self.unicode_snob: - nbsp = html.entities.html5["nbsp;"] - else: - nbsp = " " - outtext = outtext.replace(" _place_holder;", nbsp) - - # Clear self.outtextlist to avoid memory leak of its content to - # the next handling. - self.outtextlist = [] - - return outtext - - def handle_charref(self, c: str) -> None: - self.handle_data(self.charref(c), True) - - def handle_entityref(self, c: str) -> None: - ref = self.entityref(c) - - # ref may be an empty string (e.g. for ‎/‏ markers that should - # not contribute to the final output). - # self.handle_data cannot handle a zero-length string right after a - # stressed tag or mid-text within a stressed tag (text get split and - # self.stressed/self.preceding_stressed gets switched after the first - # part of that text). - if ref: - self.handle_data(ref, True) - - def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None: - self.handle_tag(tag, dict(attrs), start=True) - - def handle_endtag(self, tag: str) -> None: - self.handle_tag(tag, {}, start=False) - - def previousIndex(self, attrs: Dict[str, Optional[str]]) -> Optional[int]: - """ - :type attrs: dict - - :returns: The index of certain set of attributes (of a link) in the - self.a list. If the set of attributes is not found, returns None - :rtype: int - """ - if "href" not in attrs: - return None - - match = False - for i, a in enumerate(self.a): - if "href" in a.attrs and a.attrs["href"] == attrs["href"]: - if "title" in a.attrs or "title" in attrs: - if ( - "title" in a.attrs - and "title" in attrs - and a.attrs["title"] == attrs["title"] - ): - match = True - else: - match = True - - if match: - return i - return None - - def handle_emphasis( - self, start: bool, tag_style: Dict[str, str], parent_style: Dict[str, str] - ) -> None: - """ - Handles various text emphases - """ - tag_emphasis = google_text_emphasis(tag_style) - parent_emphasis = google_text_emphasis(parent_style) - - # handle Google's text emphasis - strikethrough = "line-through" in tag_emphasis and self.hide_strikethrough - - # google and others may mark a font's weight as `bold` or `700` - bold = False - for bold_marker in config.BOLD_TEXT_STYLE_VALUES: - bold = bold_marker in tag_emphasis and bold_marker not in parent_emphasis - if bold: - break - - italic = "italic" in tag_emphasis and "italic" not in parent_emphasis - fixed = ( - google_fixed_width_font(tag_style) - and not google_fixed_width_font(parent_style) - and not self.pre - ) - - if start: - # crossed-out text must be handled before other attributes - # in order not to output qualifiers unnecessarily - if bold or italic or fixed: - self.emphasis += 1 - if strikethrough: - self.quiet += 1 - if italic: - self.o(self.emphasis_mark) - self.drop_white_space += 1 - if bold: - self.o(self.strong_mark) - self.drop_white_space += 1 - if fixed: - self.o("`") - self.drop_white_space += 1 - self.code = True - else: - if bold or italic or fixed: - # there must not be whitespace before closing emphasis mark - self.emphasis -= 1 - self.space = False - if fixed: - if self.drop_white_space: - # empty emphasis, drop it - self.drop_white_space -= 1 - else: - self.o("`") - self.code = False - if bold: - if self.drop_white_space: - # empty emphasis, drop it - self.drop_white_space -= 1 - else: - self.o(self.strong_mark) - if italic: - if self.drop_white_space: - # empty emphasis, drop it - self.drop_white_space -= 1 - else: - self.o(self.emphasis_mark) - # space is only allowed after *all* emphasis marks - if (bold or italic) and not self.emphasis: - self.o(" ") - if strikethrough: - self.quiet -= 1 - - def handle_tag( - self, tag: str, attrs: Dict[str, Optional[str]], start: bool - ) -> None: - self.current_tag = tag - - if self.tag_callback is not None: - if self.tag_callback(self, tag, attrs, start) is True: - return - - # first thing inside the anchor tag is another tag - # that produces some output - if ( - start - and self.maybe_automatic_link is not None - and tag not in ["p", "div", "style", "dl", "dt"] - and (tag != "img" or self.ignore_images) - ): - self.o("[") - self.maybe_automatic_link = None - self.empty_link = False - - if self.google_doc: - # the attrs parameter is empty for a closing tag. in addition, we - # need the attributes of the parent nodes in order to get a - # complete style description for the current element. we assume - # that google docs export well formed html. - parent_style = {} # type: Dict[str, str] - if start: - if self.tag_stack: - parent_style = self.tag_stack[-1][2] - tag_style = element_style(attrs, self.style_def, parent_style) - self.tag_stack.append((tag, attrs, tag_style)) - else: - dummy, attrs, tag_style = ( - self.tag_stack.pop() if self.tag_stack else (None, {}, {}) - ) - if self.tag_stack: - parent_style = self.tag_stack[-1][2] - - if hn(tag): - # check if nh is inside of an 'a' tag - # (incorrect but found in the wild) - if self.astack: - if start: - self.inheader = True - # are inside link name, so only add '#' if it can appear before '[' - if self.outtextlist and self.outtextlist[-1] == "[": - self.outtextlist.pop() - self.space = False - self.o(hn(tag) * "#" + " ") - self.o("[") - self.header_id = attrs.get('id') - else: - self.p() - if start: - self.inheader = True - self.o(hn(tag) * "#" + " ") - if self.header_id: - self.o(' {#' + self.header_id + '}') - self.header_id = None - else: - self.inheader = False - return # prevent redundant emphasis marks on headers - - if 'class' in attrs: - self.current_class = attrs.get('class', '') - # self.p() - if not start: - self.current_class = '' - - if tag == 'span': - if 'style' in attrs: - if attrs.get('style') == 'text-align: center': - self.current_class = 'center' - if not start: - self.current_class = '' - if start: - if self.current_class == 'highlight' and \ - self.inheader == False and \ - self.span_lead == False and \ - self.astack == False: - self.o('`') # NOTE: same as - self.span_highlight = True - elif self.current_class == 'lead' and \ - self.inheader == False and \ - self.span_highlight == False: - #self.o("==") # NOTE: CriticMarkup {== - self.span_lead = True - else: - if self.span_highlight: - self.o('`') - self.span_highlight = False - elif self.span_lead: - #self.o('==') - self.span_lead = False - - if tag in ["p", "div"]: - if self.google_doc: - if start and google_has_height(tag_style): - self.p() - else: - self.soft_br() - elif self.astack or self.inheader: - pass - else: - self.p() - - if tag == "br" and start: - if self.blockquote > 0: - self.o(" \n> ") - else: - self.o(" \n") - - if tag == "hr" and start: - self.p() - self.o("* * *") - self.p() - - if tag in ["head", "style", "script"]: - if start: - self.quiet += 1 - else: - self.quiet -= 1 - - if tag == "style": - if start: - self.style += 1 - else: - self.style -= 1 - - if tag in ["body"]: - self.quiet = 0 # sites like 9rules.com never close - - if tag == "blockquote": - if start: - self.p() - self.o("> ", force=True) - self.start = True - self.blockquote += 1 - else: - self.blockquote -= 1 - self.p() - - if tag in ["em", "i", "u"] and not self.ignore_emphasis: - # Separate with a space if we immediately follow an alphanumeric - # character, since otherwise Markdown won't render the emphasis - # marks, and we'll be left with eg 'foo_bar_' visible. - # (Don't add a space otherwise, though, since there isn't one in the - # original HTML.) - if ( - start - and self.preceding_data - and self.preceding_data[-1] not in string.whitespace - and self.preceding_data[-1] not in string.punctuation - ): - emphasis = " " + self.emphasis_mark - self.preceding_data += " " - else: - emphasis = self.emphasis_mark - - self.o(emphasis) - if start: - self.stressed = True - - if tag in ["strong", "b"] and not self.ignore_emphasis: - # Separate with space if we immediately follow an * character, since - # without it, Markdown won't render the resulting *** correctly. - # (Don't add a space otherwise, though, since there isn't one in the - # original HTML.) - if not self.inheader and not self.astack \ - and not self.span_lead and not self.span_highlight: - if ( - start - and self.preceding_data - and self.preceding_data[-1] == self.strong_mark[0] - ): - strong = " " + self.strong_mark - self.preceding_data += " " - else: - strong = self.strong_mark - - self.o(strong) - if start: - self.stressed = True - - if tag in ["del", "strike", "s"]: - if start and self.preceding_data and self.preceding_data[-1] == "~": - strike = " ~~" - self.preceding_data += " " - else: - strike = "~~" - - self.o(strike) - if start: - self.stressed = True - - if self.google_doc: - if not self.inheader: - # handle some font attributes, but leave headers clean - self.handle_emphasis(start, tag_style, parent_style) - - if tag in ["kbd", "code", "tt"] and not self.pre: - self.o("`") # TODO: `` `this` `` - self.code = not self.code - - if tag == "abbr": - if start: - self.abbr_title = None - self.abbr_data = "" - if "title" in attrs: - self.abbr_title = attrs["title"] - else: - if self.abbr_title is not None: - assert self.abbr_data is not None - self.abbr_list[self.abbr_data] = self.abbr_title - self.abbr_title = None - self.abbr_data = None - - if tag == "q": - if not self.quote: - self.o(self.open_quote) - else: - self.o(self.close_quote) - self.quote = not self.quote - - def link_url(self: HTML2Text, link: str, title: str = "") -> None: - url = urlparse.urljoin(self.baseurl, link) - title = ' "{}"'.format(title) if title.strip() else "" - self.o("]({url}{title})".format(url=escape_md(url), title=title)) - - if tag == "a" and not self.ignore_links: - if start: - if 'data-original-title' in attrs: - # WARNING: old discours specific code - self.o('&&&%s&&&' % attrs['data-original-title']) - else: - if ( - "href" in attrs - and not attrs["href"].startswith('#_ftn') - and attrs["href"] is not None - and not (self.skip_internal_links and attrs["href"].startswith("#")) - and not (self.ignore_mailto_links and attrs["href"].startswith("mailto:")) - ): - self.astack.append(attrs) - self.maybe_automatic_link = attrs["href"] - self.empty_link = True - if self.protect_links: - attrs["href"] = "<" + attrs["href"] + ">" - else: - self.astack.append(None) - else: - if self.astack: - a = self.astack.pop() - if self.maybe_automatic_link and not self.empty_link: - self.maybe_automatic_link = None - elif a: - assert a["href"] is not None - if self.empty_link: - self.o("[") - self.empty_link = False - self.maybe_automatic_link = None - if self.inline_links: - self.p_p = 0 - title = a.get("title") or "" - title = escape_md(title) - link_url(self, a["href"], title) - else: - i = self.previousIndex(a) - if i is not None: - a_props = self.a[i] - else: - self.acount += 1 - a_props = AnchorElement(a, self.acount, self.outcount) - self.a.append(a_props) - self.o("][" + str(a_props.count) + "]") - - if tag == "img" and start and not self.ignore_images: - # skip cloudinary images - if "src" in attrs and 'cloudinary' not in attrs['src']: - assert attrs["src"] is not None - if not self.images_to_alt: - attrs["href"] = attrs["src"] - alt = attrs.get("alt") or self.default_image_alt - - # If we have images_with_size, write raw html including width, - # height, and alt attributes - if self.images_as_html or ( - self.images_with_size and ("width" in attrs or "height" in attrs) - ): - self.o("") - return - - # If we have a link to create, output the start - if self.maybe_automatic_link is not None: - href = self.maybe_automatic_link - if ( - self.images_to_alt - and escape_md(alt) == href - and self.absolute_url_matcher.match(href) - ): - self.o("<" + escape_md(alt) + ">") - self.empty_link = False - return - else: - self.o("[") - self.maybe_automatic_link = None - self.empty_link = False - - # If we have images_to_alt, we discard the image itself, - # considering only the alt text. - if self.images_to_alt: - self.o(escape_md(alt)) - else: - self.o("![" + escape_md(alt) + "]") - if self.inline_links: - href = attrs.get("href") or "" - self.o( - "(" + escape_md(urlparse.urljoin(self.baseurl, href)) + ")" - ) - else: - i = self.previousIndex(attrs) - if i is not None: - a_props = self.a[i] - else: - self.acount += 1 - a_props = AnchorElement(attrs, self.acount, self.outcount) - self.a.append(a_props) - self.o("[" + str(a_props.count) + "]") - - if tag == "dl" and start: - self.p() - if tag == "dt" and not start: - self.pbr() - if tag == "dd" and start: - self.o(" ") - if tag == "dd" and not start: - self.pbr() - - if tag in ["ol", "ul"]: - # Google Docs create sub lists as top level lists - if not self.list and not self.lastWasList: - self.p() - if start: - if self.google_doc: - list_style = google_list_style(tag_style) - else: - list_style = tag - numbering_start = list_numbering_start(attrs) - self.list.append(ListElement(list_style, numbering_start)) - else: - if self.list: - self.list.pop() - if not self.google_doc and not self.list: - self.o("\n") - self.lastWasList = True - else: - self.lastWasList = False - - if tag == "li": - self.pbr() - if start: - if self.list: - li = self.list[-1] - else: - li = ListElement("ul", 0) - if self.google_doc: - self.o(" " * self.google_nest_count(tag_style)) - else: - # Indent two spaces per list, except use three spaces for an - # unordered list inside an ordered list. - # https://spec.commonmark.org/0.28/#motivation - # TODO: line up
  1. s > 9 correctly. - parent_list = None - for list in self.list: - self.o( - " " if parent_list == "ol" and list.name == "ul" else " " - ) - parent_list = list.name - - if li.name == "ul": - self.o(self.ul_item_mark + " ") - elif li.name == "ol": - li.num += 1 - self.o(str(li.num) + ". ") - self.start = True - - if tag in ["table", "tr", "td", "th"]: - if self.ignore_tables: - if tag == "tr": - if start: - pass - else: - self.soft_br() - else: - pass - - elif self.bypass_tables: - if start: - self.soft_br() - if tag in ["td", "th"]: - if start: - self.o("<{}>\n\n".format(tag)) - else: - self.o("\n".format(tag)) - else: - if start: - self.o("<{}>".format(tag)) - else: - self.o("".format(tag)) - - else: - if tag == "table": - if start: - self.table_start = True - if self.pad_tables: - self.o("<" + config.TABLE_MARKER_FOR_PAD + ">") - self.o(" \n") - else: - if self.pad_tables: - # add break in case the table is empty or its 1 row table - self.soft_br() - self.o("") - self.o(" \n") - if tag in ["td", "th"] and start: - if self.split_next_td: - self.o("| ") - self.split_next_td = True - - if tag == "tr" and start: - self.td_count = 0 - if tag == "tr" and not start: - self.split_next_td = False - self.soft_br() - if tag == "tr" and not start and self.table_start: - # Underline table header - self.o("|".join(["---"] * self.td_count)) - self.soft_br() - self.table_start = False - if tag in ["td", "th"] and start: - self.td_count += 1 - - if tag == "pre": - if start: - self.startpre = True - self.pre = True - else: - self.pre = False - if self.mark_code: - self.out("\n[/code]") - self.p() - - # TODO: Add docstring for these one letter functions - def pbr(self) -> None: - "Pretty print has a line break" - if self.p_p == 0: - self.p_p = 1 - - def p(self) -> None: - "Set pretty print to 1 or 2 lines" - self.p_p = 1 if self.single_line_break else 2 - - def soft_br(self) -> None: - "Soft breaks" - self.pbr() - self.br_toggle = " " - - def o( - self, data: str, puredata: bool = False, force: Union[bool, str] = False - ) -> None: - """ - Deal with indentation and whitespace - """ - if self.abbr_data is not None: - self.abbr_data += data - - if not self.quiet: - if self.google_doc: - # prevent white space immediately after 'begin emphasis' - # marks ('**' and '_') - lstripped_data = data.lstrip() - if self.drop_white_space and not (self.pre or self.code): - data = lstripped_data - if lstripped_data != "": - self.drop_white_space = 0 - - if puredata and not self.pre: - # This is a very dangerous call ... it could mess up - # all handling of   when not handled properly - # (see entityref) - data = re.sub(r"\s+", r" ", data) - if data and data[0] == " ": - self.space = True - data = data[1:] - if not data and not force: - return - - if self.startpre: - # self.out(" :") #TODO: not output when already one there - if not data.startswith("\n") and not data.startswith("\r\n"): - #
    stuff...
    -					data = "\n" + data
    -				if self.mark_code:
    -					self.out("\n[code]")
    -					self.p_p = 0
    -
    -			bq = ">" * self.blockquote
    -			if not (force and data and data[0] == ">") and self.blockquote:
    -				bq += " "
    -
    -			if self.pre:
    -				if not self.list:
    -					bq += "    "
    -				# else: list content is already partially indented
    -				bq += "    " * len(self.list)
    -				data = data.replace("\n", "\n" + bq)
    -
    -			if self.startpre:
    -				self.startpre = False
    -				if self.list:
    -					# use existing initial indentation
    -					data = data.lstrip("\n")
    -
    -			if self.start:
    -				self.space = False
    -				self.p_p = 0
    -				self.start = False
    -
    -			if force == "end":
    -				# It's the end.
    -				self.p_p = 0
    -				self.out("\n")
    -				self.space = False
    -
    -			if self.p_p:
    -				self.out((self.br_toggle + "\n" + bq) * self.p_p)
    -				self.space = False
    -				self.br_toggle = ""
    -
    -			if self.space:
    -				if not self.lastWasNL:
    -					self.out(" ")
    -				self.space = False
    -
    -			if self.a and (
    -				(self.p_p == 2 and self.links_each_paragraph) or force == "end"
    -			):
    -				if force == "end":
    -					self.out("\n")
    -
    -				newa = []
    -				for link in self.a:
    -					if self.outcount > link.outcount:
    -						self.out(
    -							"   ["
    -							+ str(link.count)
    -							+ "]: "
    -							+ urlparse.urljoin(self.baseurl, link.attrs["href"])
    -						)
    -						if "title" in link.attrs:
    -							assert link.attrs["title"] is not None
    -							self.out(" (" + link.attrs["title"] + ")")
    -						self.out("\n")
    -					else:
    -						newa.append(link)
    -
    -				# Don't need an extra line when nothing was done.
    -				if self.a != newa:
    -					self.out("\n")
    -
    -				self.a = newa
    -
    -			if self.abbr_list and force == "end":
    -				for abbr, definition in self.abbr_list.items():
    -					self.out("  *[" + abbr + "]: " + definition + "\n")
    -
    -			self.p_p = 0
    -			self.out(data)
    -			self.outcount += 1
    -
    -	def handle_data(self, data: str, entity_char: bool = False) -> None:
    -		if not data:
    -			# Data may be empty for some HTML entities. For example,
    -			# LEFT-TO-RIGHT MARK.
    -			return
    -
    -		if self.stressed:
    -			data = data.strip()
    -			self.stressed = False
    -			self.preceding_stressed = True
    -		elif self.preceding_stressed:
    -			if (
    -				re.match(r"[^][(){}\s.!?]", data[0])
    -				and not hn(self.current_tag)
    -				and self.current_tag not in ["a", "code", "pre"]
    -			):
    -				# should match a letter or common punctuation
    -				data = " " + data
    -			self.preceding_stressed = False
    -
    -		if self.style:
    -			self.style_def.update(dumb_css_parser(data))
    -
    -		if self.maybe_automatic_link is not None:
    -			href = self.maybe_automatic_link
    -			if (
    -				href == data
    -				and self.absolute_url_matcher.match(href)
    -				and self.use_automatic_links
    -			):
    -				self.o("<" + data + ">")
    -				self.empty_link = False
    -				return
    -			else:
    -				self.o("[")
    -				self.maybe_automatic_link = None
    -				self.empty_link = False
    -
    -		if not self.code and not self.pre and not entity_char:
    -			data = escape_md_section(data, snob=self.escape_snob)
    -		self.preceding_data = data
    -		self.o(data, puredata=True)
    -
    -	def charref(self, name: str) -> str:
    -		if name[0] in ["x", "X"]:
    -			c = int(name[1:], 16)
    -		else:
    -			c = int(name)
    -
    -		if not self.unicode_snob and c in unifiable_n:
    -			return unifiable_n[c]
    -		else:
    -			try:
    -				return chr(c)
    -			except ValueError:  # invalid unicode
    -				return ""
    -
    -	def entityref(self, c: str) -> str:
    -		if not self.unicode_snob and c in config.UNIFIABLE:
    -			return config.UNIFIABLE[c]
    -		try:
    -			ch = html.entities.html5[c + ";"]
    -		except KeyError:
    -			return "&" + c + ";"
    -		return config.UNIFIABLE[c] if c == "nbsp" else ch
    -
    -	def google_nest_count(self, style: Dict[str, str]) -> int:
    -		"""
    -		Calculate the nesting count of google doc lists
    -
    -		:type style: dict
    -
    -		:rtype: int
    -		"""
    -		nest_count = 0
    -		if "margin-left" in style:
    -			nest_count = int(style["margin-left"][:-2]) // self.google_list_indent
    -
    -		return nest_count
    -
    -	def optwrap(self, text: str) -> str:
    -		"""
    -		Wrap all paragraphs in the provided text.
    -
    -		:type text: str
    -
    -		:rtype: str
    -		"""
    -		if not self.body_width:
    -			return text
    -
    -		result = ""
    -		newlines = 0
    -		# I cannot think of a better solution for now.
    -		# To avoid the non-wrap behaviour for entire paras
    -		# because of the presence of a link in it
    -		if not self.wrap_links:
    -			self.inline_links = False
    -		for para in text.split("\n"):
    -			if len(para) > 0:
    -				if not skipwrap(
    -					para, self.wrap_links, self.wrap_list_items, self.wrap_tables
    -				):
    -					indent = ""
    -					if para.startswith("  " + self.ul_item_mark):
    -						# list item continuation: add a double indent to the
    -						# new lines
    -						indent = "    "
    -					elif para.startswith("> "):
    -						# blockquote continuation: add the greater than symbol
    -						# to the new lines
    -						indent = "> "
    -					wrapped = wrap(
    -						para,
    -						self.body_width,
    -						break_long_words=False,
    -						subsequent_indent=indent,
    -					)
    -					result += "\n".join(wrapped)
    -					if para.endswith("  "):
    -						result += "  \n"
    -						newlines = 1
    -					elif indent:
    -						result += "\n"
    -						newlines = 1
    -					else:
    -						result += "\n\n"
    -						newlines = 2
    -				else:
    -					# Warning for the tempted!!!
    -					# Be aware that obvious replacement of this with
    -					# line.isspace()
    -					# DOES NOT work! Explanations are welcome.
    -					if not config.RE_SPACE.match(para):
    -						result += para + "\n"
    -						newlines = 1
    -			else:
    -				if newlines < 2:
    -					result += "\n"
    -					newlines += 1
    -		return result
    -
    -
    -def html2text(html: str, baseurl: str = "", bodywidth: Optional[int] = config.BODY_WIDTH) -> str:
    -	h = html.strip() or ''
    -	if h: 
    -		h = HTML2Text(baseurl=baseurl, bodywidth=bodywidth)
    -		h = h.handle(html.strip())
    -		# print('[html2text] %d bytes' % len(html))
    -	return h
    diff --git a/migration/html2text/__main__.py b/migration/html2text/__main__.py
    deleted file mode 100644
    index 4e28416e..00000000
    --- a/migration/html2text/__main__.py
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -from .cli import main
    -
    -main()
    diff --git a/migration/html2text/cli.py b/migration/html2text/cli.py
    deleted file mode 100644
    index d0c62c97..00000000
    --- a/migration/html2text/cli.py
    +++ /dev/null
    @@ -1,322 +0,0 @@
    -import argparse
    -import sys
    -
    -from . import HTML2Text, __version__, config
    -
    -
    -def main() -> None:
    -    baseurl = ""
    -
    -    class bcolors:
    -        HEADER = "\033[95m"
    -        OKBLUE = "\033[94m"
    -        OKGREEN = "\033[92m"
    -        WARNING = "\033[93m"
    -        FAIL = "\033[91m"
    -        ENDC = "\033[0m"
    -        BOLD = "\033[1m"
    -        UNDERLINE = "\033[4m"
    -
    -    p = argparse.ArgumentParser()
    -    p.add_argument(
    -        "--default-image-alt",
    -        dest="default_image_alt",
    -        default=config.DEFAULT_IMAGE_ALT,
    -        help="The default alt string for images with missing ones",
    -    )
    -    p.add_argument(
    -        "--pad-tables",
    -        dest="pad_tables",
    -        action="store_true",
    -        default=config.PAD_TABLES,
    -        help="pad the cells to equal column width in tables",
    -    )
    -    p.add_argument(
    -        "--no-wrap-links",
    -        dest="wrap_links",
    -        action="store_false",
    -        default=config.WRAP_LINKS,
    -        help="don't wrap links during conversion",
    -    )
    -    p.add_argument(
    -        "--wrap-list-items",
    -        dest="wrap_list_items",
    -        action="store_true",
    -        default=config.WRAP_LIST_ITEMS,
    -        help="wrap list items during conversion",
    -    )
    -    p.add_argument(
    -        "--wrap-tables",
    -        dest="wrap_tables",
    -        action="store_true",
    -        default=config.WRAP_TABLES,
    -        help="wrap tables",
    -    )
    -    p.add_argument(
    -        "--ignore-emphasis",
    -        dest="ignore_emphasis",
    -        action="store_true",
    -        default=config.IGNORE_EMPHASIS,
    -        help="don't include any formatting for emphasis",
    -    )
    -    p.add_argument(
    -        "--reference-links",
    -        dest="inline_links",
    -        action="store_false",
    -        default=config.INLINE_LINKS,
    -        help="use reference style links instead of inline links",
    -    )
    -    p.add_argument(
    -        "--ignore-links",
    -        dest="ignore_links",
    -        action="store_true",
    -        default=config.IGNORE_ANCHORS,
    -        help="don't include any formatting for links",
    -    )
    -    p.add_argument(
    -        "--ignore-mailto-links",
    -        action="store_true",
    -        dest="ignore_mailto_links",
    -        default=config.IGNORE_MAILTO_LINKS,
    -        help="don't include mailto: links",
    -    )
    -    p.add_argument(
    -        "--protect-links",
    -        dest="protect_links",
    -        action="store_true",
    -        default=config.PROTECT_LINKS,
    -        help="protect links from line breaks surrounding them with angle brackets",
    -    )
    -    p.add_argument(
    -        "--ignore-images",
    -        dest="ignore_images",
    -        action="store_true",
    -        default=config.IGNORE_IMAGES,
    -        help="don't include any formatting for images",
    -    )
    -    p.add_argument(
    -        "--images-as-html",
    -        dest="images_as_html",
    -        action="store_true",
    -        default=config.IMAGES_AS_HTML,
    -        help=(
    -            "Always write image tags as raw html; preserves `height`, `width` and "
    -            "`alt` if possible."
    -        ),
    -    )
    -    p.add_argument(
    -        "--images-to-alt",
    -        dest="images_to_alt",
    -        action="store_true",
    -        default=config.IMAGES_TO_ALT,
    -        help="Discard image data, only keep alt text",
    -    )
    -    p.add_argument(
    -        "--images-with-size",
    -        dest="images_with_size",
    -        action="store_true",
    -        default=config.IMAGES_WITH_SIZE,
    -        help=(
    -            "Write image tags with height and width attrs as raw html to retain "
    -            "dimensions"
    -        ),
    -    )
    -    p.add_argument(
    -        "-g",
    -        "--google-doc",
    -        action="store_true",
    -        dest="google_doc",
    -        default=False,
    -        help="convert an html-exported Google Document",
    -    )
    -    p.add_argument(
    -        "-d",
    -        "--dash-unordered-list",
    -        action="store_true",
    -        dest="ul_style_dash",
    -        default=False,
    -        help="use a dash rather than a star for unordered list items",
    -    )
    -    p.add_argument(
    -        "-e",
    -        "--asterisk-emphasis",
    -        action="store_true",
    -        dest="em_style_asterisk",
    -        default=False,
    -        help="use an asterisk rather than an underscore for emphasized text",
    -    )
    -    p.add_argument(
    -        "-b",
    -        "--body-width",
    -        dest="body_width",
    -        type=int,
    -        default=config.BODY_WIDTH,
    -        help="number of characters per output line, 0 for no wrap",
    -    )
    -    p.add_argument(
    -        "-i",
    -        "--google-list-indent",
    -        dest="list_indent",
    -        type=int,
    -        default=config.GOOGLE_LIST_INDENT,
    -        help="number of pixels Google indents nested lists",
    -    )
    -    p.add_argument(
    -        "-s",
    -        "--hide-strikethrough",
    -        action="store_true",
    -        dest="hide_strikethrough",
    -        default=False,
    -        help="hide strike-through text. only relevant when -g is " "specified as well",
    -    )
    -    p.add_argument(
    -        "--escape-all",
    -        action="store_true",
    -        dest="escape_snob",
    -        default=False,
    -        help=(
    -            "Escape all special characters.  Output is less readable, but avoids "
    -            "corner case formatting issues."
    -        ),
    -    )
    -    p.add_argument(
    -        "--bypass-tables",
    -        action="store_true",
    -        dest="bypass_tables",
    -        default=config.BYPASS_TABLES,
    -        help="Format tables in HTML rather than Markdown syntax.",
    -    )
    -    p.add_argument(
    -        "--ignore-tables",
    -        action="store_true",
    -        dest="ignore_tables",
    -        default=config.IGNORE_TABLES,
    -        help="Ignore table-related tags (table, th, td, tr) " "while keeping rows.",
    -    )
    -    p.add_argument(
    -        "--single-line-break",
    -        action="store_true",
    -        dest="single_line_break",
    -        default=config.SINGLE_LINE_BREAK,
    -        help=(
    -            "Use a single line break after a block element rather than two line "
    -            "breaks. NOTE: Requires --body-width=0"
    -        ),
    -    )
    -    p.add_argument(
    -        "--unicode-snob",
    -        action="store_true",
    -        dest="unicode_snob",
    -        default=config.UNICODE_SNOB,
    -        help="Use unicode throughout document",
    -    )
    -    p.add_argument(
    -        "--no-automatic-links",
    -        action="store_false",
    -        dest="use_automatic_links",
    -        default=config.USE_AUTOMATIC_LINKS,
    -        help="Do not use automatic links wherever applicable",
    -    )
    -    p.add_argument(
    -        "--no-skip-internal-links",
    -        action="store_false",
    -        dest="skip_internal_links",
    -        default=config.SKIP_INTERNAL_LINKS,
    -        help="Do not skip internal links",
    -    )
    -    p.add_argument(
    -        "--links-after-para",
    -        action="store_true",
    -        dest="links_each_paragraph",
    -        default=config.LINKS_EACH_PARAGRAPH,
    -        help="Put links after each paragraph instead of document",
    -    )
    -    p.add_argument(
    -        "--mark-code",
    -        action="store_true",
    -        dest="mark_code",
    -        default=config.MARK_CODE,
    -        help="Mark program code blocks with [code]...[/code]",
    -    )
    -    p.add_argument(
    -        "--decode-errors",
    -        dest="decode_errors",
    -        default=config.DECODE_ERRORS,
    -        help=(
    -            "What to do in case of decode errors.'ignore', 'strict' and 'replace' are "
    -            "acceptable values"
    -        ),
    -    )
    -    p.add_argument(
    -        "--open-quote",
    -        dest="open_quote",
    -        default=config.OPEN_QUOTE,
    -        help="The character used to open quotes",
    -    )
    -    p.add_argument(
    -        "--close-quote",
    -        dest="close_quote",
    -        default=config.CLOSE_QUOTE,
    -        help="The character used to close quotes",
    -    )
    -    p.add_argument(
    -        "--version", action="version", version=".".join(map(str, __version__))
    -    )
    -    p.add_argument("filename", nargs="?")
    -    p.add_argument("encoding", nargs="?", default="utf-8")
    -    args = p.parse_args()
    -
    -    if args.filename and args.filename != "-":
    -        with open(args.filename, "rb") as fp:
    -            data = fp.read()
    -    else:
    -        data = sys.stdin.buffer.read()
    -
    -    try:
    -        html = data.decode(args.encoding, args.decode_errors)
    -    except UnicodeDecodeError as err:
    -        warning = bcolors.WARNING + "Warning:" + bcolors.ENDC
    -        warning += " Use the " + bcolors.OKGREEN
    -        warning += "--decode-errors=ignore" + bcolors.ENDC + " flag."
    -        print(warning)
    -        raise err
    -
    -    h = HTML2Text(baseurl=baseurl)
    -    # handle options
    -    if args.ul_style_dash:
    -        h.ul_item_mark = "-"
    -    if args.em_style_asterisk:
    -        h.emphasis_mark = "*"
    -        h.strong_mark = "__"
    -
    -    h.body_width = args.body_width
    -    h.google_list_indent = args.list_indent
    -    h.ignore_emphasis = args.ignore_emphasis
    -    h.ignore_links = args.ignore_links
    -    h.ignore_mailto_links = args.ignore_mailto_links
    -    h.protect_links = args.protect_links
    -    h.ignore_images = args.ignore_images
    -    h.images_as_html = args.images_as_html
    -    h.images_to_alt = args.images_to_alt
    -    h.images_with_size = args.images_with_size
    -    h.google_doc = args.google_doc
    -    h.hide_strikethrough = args.hide_strikethrough
    -    h.escape_snob = args.escape_snob
    -    h.bypass_tables = args.bypass_tables
    -    h.ignore_tables = args.ignore_tables
    -    h.single_line_break = args.single_line_break
    -    h.inline_links = args.inline_links
    -    h.unicode_snob = args.unicode_snob
    -    h.use_automatic_links = args.use_automatic_links
    -    h.skip_internal_links = args.skip_internal_links
    -    h.links_each_paragraph = args.links_each_paragraph
    -    h.mark_code = args.mark_code
    -    h.wrap_links = args.wrap_links
    -    h.wrap_list_items = args.wrap_list_items
    -    h.wrap_tables = args.wrap_tables
    -    h.pad_tables = args.pad_tables
    -    h.default_image_alt = args.default_image_alt
    -    h.open_quote = args.open_quote
    -    h.close_quote = args.close_quote
    -
    -    sys.stdout.write(h.handle(html))
    diff --git a/migration/html2text/config.py b/migration/html2text/config.py
    deleted file mode 100644
    index 0f4d29bc..00000000
    --- a/migration/html2text/config.py
    +++ /dev/null
    @@ -1,164 +0,0 @@
    -import re
    -
    -# Use Unicode characters instead of their ascii pseudo-replacements
    -UNICODE_SNOB = True
    -
    -# Marker to use for marking tables for padding post processing
    -TABLE_MARKER_FOR_PAD = "special_marker_for_table_padding"
    -# Escape all special characters.  Output is less readable, but avoids
    -# corner case formatting issues.
    -ESCAPE_SNOB = True
    -
    -# Put the links after each paragraph instead of at the end.
    -LINKS_EACH_PARAGRAPH = False
    -
    -# Wrap long lines at position. 0 for no wrapping.
    -BODY_WIDTH = 0
    -
    -# Don't show internal links (href="#local-anchor") -- corresponding link
    -# targets won't be visible in the plain text file anyway.
    -SKIP_INTERNAL_LINKS = False
    -
    -# Use inline, rather than reference, formatting for images and links
    -INLINE_LINKS = True
    -
    -# Protect links from line breaks surrounding them with angle brackets (in
    -# addition to their square brackets)
    -PROTECT_LINKS = True
    -WRAP_LINKS = True
    -
    -# Wrap list items.
    -WRAP_LIST_ITEMS = False
    -
    -# Wrap tables
    -WRAP_TABLES = False
    -
    -# Number of pixels Google indents nested lists
    -GOOGLE_LIST_INDENT = 36
    -
    -# Values Google and others may use to indicate bold text
    -BOLD_TEXT_STYLE_VALUES = ("bold", "700", "800", "900")
    -
    -IGNORE_ANCHORS = False
    -IGNORE_MAILTO_LINKS = False
    -IGNORE_IMAGES = False
    -IMAGES_AS_HTML = False
    -IMAGES_TO_ALT = False
    -IMAGES_WITH_SIZE = False
    -IGNORE_EMPHASIS = False
    -MARK_CODE = True
    -DECODE_ERRORS = "strict"
    -DEFAULT_IMAGE_ALT = ""
    -PAD_TABLES = True
    -
    -# Convert links with same href and text to  format
    -# if they are absolute links
    -USE_AUTOMATIC_LINKS = True
    -
    -# For checking space-only lines on line 771
    -RE_SPACE = re.compile(r"\s\+")
    -
    -RE_ORDERED_LIST_MATCHER = re.compile(r"\d+\.\s")
    -RE_UNORDERED_LIST_MATCHER = re.compile(r"[-\*\+]\s")
    -RE_MD_CHARS_MATCHER = re.compile(r"([\\\[\]\(\)])")
    -RE_MD_CHARS_MATCHER_ALL = re.compile(r"([`\*_{}\[\]\(\)#!])")
    -
    -# to find links in the text
    -RE_LINK = re.compile(r"(\[.*?\] ?\(.*?\))|(\[.*?\]:.*?)")
    -
    -# to find table separators
    -RE_TABLE = re.compile(r" \| ")
    -
    -RE_MD_DOT_MATCHER = re.compile(
    -    r"""
    -    ^             # start of line
    -    (\s*\d+)      # optional whitespace and a number
    -    (\.)          # dot
    -    (?=\s)        # lookahead assert whitespace
    -    """,
    -    re.MULTILINE | re.VERBOSE,
    -)
    -RE_MD_PLUS_MATCHER = re.compile(
    -    r"""
    -    ^
    -    (\s*)
    -    (\+)
    -    (?=\s)
    -    """,
    -    flags=re.MULTILINE | re.VERBOSE,
    -)
    -RE_MD_DASH_MATCHER = re.compile(
    -    r"""
    -    ^
    -    (\s*)
    -    (-)
    -    (?=\s|\-)     # followed by whitespace (bullet list, or spaced out hr)
    -                  # or another dash (header or hr)
    -    """,
    -    flags=re.MULTILINE | re.VERBOSE,
    -)
    -RE_SLASH_CHARS = r"\`*_{}[]()#+-.!"
    -RE_MD_BACKSLASH_MATCHER = re.compile(
    -    r"""
    -    (\\)          # match one slash
    -    (?=[%s])      # followed by a char that requires escaping
    -    """
    -    % re.escape(RE_SLASH_CHARS),
    -    flags=re.VERBOSE,
    -)
    -
    -UNIFIABLE = {
    -    "rsquo": "'",
    -    "lsquo": "'",
    -    "rdquo": '"',
    -    "ldquo": '"',
    -    "copy": "(C)",
    -    "mdash": "--",
    -    "nbsp": " ",
    -    "rarr": "->",
    -    "larr": "<-",
    -    "middot": "*",
    -    "ndash": "-",
    -    "oelig": "oe",
    -    "aelig": "ae",
    -    "agrave": "a",
    -    "aacute": "a",
    -    "acirc": "a",
    -    "atilde": "a",
    -    "auml": "a",
    -    "aring": "a",
    -    "egrave": "e",
    -    "eacute": "e",
    -    "ecirc": "e",
    -    "euml": "e",
    -    "igrave": "i",
    -    "iacute": "i",
    -    "icirc": "i",
    -    "iuml": "i",
    -    "ograve": "o",
    -    "oacute": "o",
    -    "ocirc": "o",
    -    "otilde": "o",
    -    "ouml": "o",
    -    "ugrave": "u",
    -    "uacute": "u",
    -    "ucirc": "u",
    -    "uuml": "u",
    -    "lrm": "",
    -    "rlm": "",
    -}
    -
    -# Format tables in HTML rather than Markdown syntax
    -BYPASS_TABLES = False
    -# Ignore table-related tags (table, th, td, tr) while keeping rows
    -IGNORE_TABLES = False
    -
    -
    -# Use a single line break after a block element rather than two line breaks.
    -# NOTE: Requires body width setting to be 0.
    -SINGLE_LINE_BREAK = False
    -
    -
    -# Use double quotation marks when converting the  tag.
    -OPEN_QUOTE = '"'
    -CLOSE_QUOTE = '"'
    diff --git a/migration/html2text/elements.py b/migration/html2text/elements.py
    deleted file mode 100644
    index 2533ec08..00000000
    --- a/migration/html2text/elements.py
    +++ /dev/null
    @@ -1,18 +0,0 @@
    -from typing import Dict, Optional
    -
    -
    -class AnchorElement:
    -    __slots__ = ["attrs", "count", "outcount"]
    -
    -    def __init__(self, attrs: Dict[str, Optional[str]], count: int, outcount: int):
    -        self.attrs = attrs
    -        self.count = count
    -        self.outcount = outcount
    -
    -
    -class ListElement:
    -    __slots__ = ["name", "num"]
    -
    -    def __init__(self, name: str, num: int):
    -        self.name = name
    -        self.num = num
    diff --git a/migration/html2text/py.typed b/migration/html2text/py.typed
    deleted file mode 100644
    index e69de29b..00000000
    diff --git a/migration/html2text/typing.py b/migration/html2text/typing.py
    deleted file mode 100644
    index 6e17fed2..00000000
    --- a/migration/html2text/typing.py
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -class OutCallback:
    -    def __call__(self, s: str) -> None:
    -        ...
    diff --git a/migration/html2text/utils.py b/migration/html2text/utils.py
    deleted file mode 100644
    index 366748b6..00000000
    --- a/migration/html2text/utils.py
    +++ /dev/null
    @@ -1,290 +0,0 @@
    -import html.entities
    -from typing import Dict, List, Optional
    -
    -from . import config
    -
    -unifiable_n = {
    -    html.entities.name2codepoint[k]: v
    -    for k, v in config.UNIFIABLE.items()
    -    if k != "nbsp"
    -}
    -
    -
    -def hn(tag: str) -> int:
    -    if tag[0] == "h" and len(tag) == 2:
    -        n = tag[1]
    -        if "0" < n <= "9":
    -            return int(n)
    -    return 0
    -
    -
    -def dumb_property_dict(style: str) -> Dict[str, str]:
    -    """
    -    :returns: A hash of css attributes
    -    """
    -    return {
    -        x.strip().lower(): y.strip().lower()
    -        for x, y in [z.split(":", 1) for z in style.split(";") if ":" in z]
    -    }
    -
    -
    -def dumb_css_parser(data: str) -> Dict[str, Dict[str, str]]:
    -    """
    -    :type data: str
    -
    -    :returns: A hash of css selectors, each of which contains a hash of
    -    css attributes.
    -    :rtype: dict
    -    """
    -    # remove @import sentences
    -    data += ";"
    -    importIndex = data.find("@import")
    -    while importIndex != -1:
    -        data = data[0:importIndex] + data[data.find(";", importIndex) + 1 :]
    -        importIndex = data.find("@import")
    -
    -    # parse the css. reverted from dictionary comprehension in order to
    -    # support older pythons
    -    pairs = [x.split("{") for x in data.split("}") if "{" in x.strip()]
    -    try:
    -        elements = {a.strip(): dumb_property_dict(b) for a, b in pairs}
    -    except ValueError:
    -        elements = {}  # not that important
    -
    -    return elements
    -
    -
    -def element_style(
    -    attrs: Dict[str, Optional[str]],
    -    style_def: Dict[str, Dict[str, str]],
    -    parent_style: Dict[str, str],
    -) -> Dict[str, str]:
    -    """
    -    :type attrs: dict
    -    :type style_def: dict
    -    :type style_def: dict
    -
    -    :returns: A hash of the 'final' style attributes of the element
    -    :rtype: dict
    -    """
    -    style = parent_style.copy()
    -    if "class" in attrs:
    -        assert attrs["class"] is not None
    -        for css_class in attrs["class"].split():
    -            css_style = style_def.get("." + css_class, {})
    -            style.update(css_style)
    -    if "style" in attrs:
    -        assert attrs["style"] is not None
    -        immediate_style = dumb_property_dict(attrs["style"])
    -        style.update(immediate_style)
    -
    -    return style
    -
    -
    -def google_list_style(style: Dict[str, str]) -> str:
    -    """
    -    Finds out whether this is an ordered or unordered list
    -
    -    :type style: dict
    -
    -    :rtype: str
    -    """
    -    if "list-style-type" in style:
    -        list_style = style["list-style-type"]
    -        if list_style in ["disc", "circle", "square", "none"]:
    -            return "ul"
    -
    -    return "ol"
    -
    -
    -def google_has_height(style: Dict[str, str]) -> bool:
    -    """
    -    Check if the style of the element has the 'height' attribute
    -    explicitly defined
    -
    -    :type style: dict
    -
    -    :rtype: bool
    -    """
    -    return "height" in style
    -
    -
    -def google_text_emphasis(style: Dict[str, str]) -> List[str]:
    -    """
    -    :type style: dict
    -
    -    :returns: A list of all emphasis modifiers of the element
    -    :rtype: list
    -    """
    -    emphasis = []
    -    if "text-decoration" in style:
    -        emphasis.append(style["text-decoration"])
    -    if "font-style" in style:
    -        emphasis.append(style["font-style"])
    -    if "font-weight" in style:
    -        emphasis.append(style["font-weight"])
    -
    -    return emphasis
    -
    -
    -def google_fixed_width_font(style: Dict[str, str]) -> bool:
    -    """
    -    Check if the css of the current element defines a fixed width font
    -
    -    :type style: dict
    -
    -    :rtype: bool
    -    """
    -    font_family = ""
    -    if "font-family" in style:
    -        font_family = style["font-family"]
    -    return "courier new" == font_family or "consolas" == font_family
    -
    -
    -def list_numbering_start(attrs: Dict[str, Optional[str]]) -> int:
    -    """
    -    Extract numbering from list element attributes
    -
    -    :type attrs: dict
    -
    -    :rtype: int or None
    -    """
    -    if "start" in attrs:
    -        assert attrs["start"] is not None
    -        try:
    -            return int(attrs["start"]) - 1
    -        except ValueError:
    -            pass
    -
    -    return 0
    -
    -
    -def skipwrap(
    -    para: str, wrap_links: bool, wrap_list_items: bool, wrap_tables: bool
    -) -> bool:
    -    # If it appears to contain a link
    -    # don't wrap
    -    if not wrap_links and config.RE_LINK.search(para):
    -        return True
    -    # If the text begins with four spaces or one tab, it's a code block;
    -    # don't wrap
    -    if para[0:4] == "    " or para[0] == "\t":
    -        return True
    -
    -    # If the text begins with only two "--", possibly preceded by
    -    # whitespace, that's an emdash; so wrap.
    -    stripped = para.lstrip()
    -    if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-":
    -        return False
    -
    -    # I'm not sure what this is for; I thought it was to detect lists,
    -    # but there's a 
    -inside- case in one of the tests that - # also depends upon it. - if stripped[0:1] in ("-", "*") and not stripped[0:2] == "**": - return not wrap_list_items - - # If text contains a pipe character it is likely a table - if not wrap_tables and config.RE_TABLE.search(para): - return True - - # If the text begins with a single -, *, or +, followed by a space, - # or an integer, followed by a ., followed by a space (in either - # case optionally proceeded by whitespace), it's a list; don't wrap. - return bool( - config.RE_ORDERED_LIST_MATCHER.match(stripped) - or config.RE_UNORDERED_LIST_MATCHER.match(stripped) - ) - - -def escape_md(text: str) -> str: - """ - Escapes markdown-sensitive characters within other markdown - constructs. - """ - return config.RE_MD_CHARS_MATCHER.sub(r"\\\1", text) - - -def escape_md_section(text: str, snob: bool = False) -> str: - """ - Escapes markdown-sensitive characters across whole document sections. - """ - text = config.RE_MD_BACKSLASH_MATCHER.sub(r"\\\1", text) - - if snob: - text = config.RE_MD_CHARS_MATCHER_ALL.sub(r"\\\1", text) - - text = config.RE_MD_DOT_MATCHER.sub(r"\1\\\2", text) - text = config.RE_MD_PLUS_MATCHER.sub(r"\1\\\2", text) - text = config.RE_MD_DASH_MATCHER.sub(r"\1\\\2", text) - - return text - - -def reformat_table(lines: List[str], right_margin: int) -> List[str]: - """ - Given the lines of a table - padds the cells and returns the new lines - """ - # find the maximum width of the columns - max_width = [len(x.rstrip()) + right_margin for x in lines[0].split("|")] - max_cols = len(max_width) - for line in lines: - cols = [x.rstrip() for x in line.split("|")] - num_cols = len(cols) - - # don't drop any data if colspan attributes result in unequal lengths - if num_cols < max_cols: - cols += [""] * (max_cols - num_cols) - elif max_cols < num_cols: - max_width += [len(x) + right_margin for x in cols[-(num_cols - max_cols) :]] - max_cols = num_cols - - max_width = [ - max(len(x) + right_margin, old_len) for x, old_len in zip(cols, max_width) - ] - - # reformat - new_lines = [] - for line in lines: - cols = [x.rstrip() for x in line.split("|")] - if set(line.strip()) == set("-|"): - filler = "-" - new_cols = [ - x.rstrip() + (filler * (M - len(x.rstrip()))) - for x, M in zip(cols, max_width) - ] - new_lines.append("|-" + "|".join(new_cols) + "|") - else: - filler = " " - new_cols = [ - x.rstrip() + (filler * (M - len(x.rstrip()))) - for x, M in zip(cols, max_width) - ] - new_lines.append("| " + "|".join(new_cols) + "|") - return new_lines - - -def pad_tables_in_text(text: str, right_margin: int = 1) -> str: - """ - Provide padding for tables in the text - """ - lines = text.split("\n") - table_buffer = [] # type: List[str] - table_started = False - new_lines = [] - for line in lines: - # Toggle table started - if config.TABLE_MARKER_FOR_PAD in line: - table_started = not table_started - if not table_started: - table = reformat_table(table_buffer, right_margin) - new_lines.extend(table) - table_buffer = [] - new_lines.append("") - continue - # Process lines - if table_started: - table_buffer.append(line) - else: - new_lines.append(line) - return "\n".join(new_lines) diff --git a/migration/tables/__init__.py b/migration/tables/__init__.py deleted file mode 100644 index 6cc37870..00000000 --- a/migration/tables/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__all__ = ["users", "tags", "content_items", "comments"], \ No newline at end of file diff --git a/migration/tables/comments.py b/migration/tables/comments.py deleted file mode 100644 index d1147d7a..00000000 --- a/migration/tables/comments.py +++ /dev/null @@ -1,108 +0,0 @@ -from datetime import datetime -from dateutil.parser import parse as date_parse -from orm import Reaction, User -from orm import reaction -from orm.base import local_session -from migration.html2text import html2text -from orm.reaction import ReactionKind -from orm.shout import Shout - -ts = datetime.now() - -def migrate(entry, storage): - ''' - { - "_id": "hdtwS8fSyFLxXCgSC", - "body": "

    ", - "contentItem": "mnK8KsJHPRi8DrybQ", - "createdBy": "bMFPuyNg6qAD2mhXe", - "thread": "01/", - "createdAt": "2016-04-19 04:33:53+00:00", - "ratings": [ - { "createdBy": "AqmRukvRiExNpAe8C", "value": 1 }, - { "createdBy": "YdE76Wth3yqymKEu5", "value": 1 } - ], - "rating": 2, - "updatedAt": "2020-05-27 19:22:57.091000+00:00", - "updatedBy": "0" - } - - -> - - type Reaction { - id: Int! - shout: Shout! - createdAt: DateTime! - createdBy: User! - updatedAt: DateTime - deletedAt: DateTime - deletedBy: User - range: String # full / 0:2340 - kind: ReactionKind! - body: String - replyTo: Reaction - stat: Stat - old_id: String - old_thread: String - } - ''' - reaction_dict = {} - # FIXME: comment_dict['createdAt'] = ts if not entry.get('createdAt') else date_parse(entry.get('createdAt')) - # print('[migration] comment original date %r' % entry.get('createdAt')) - # print('[migration] comment date %r ' % comment_dict['createdAt']) - reaction_dict['body'] = html2text(entry.get('body', '')) - reaction_dict['oid'] = entry['_id'] - if entry.get('createdAt'): reaction_dict['createdAt'] = date_parse(entry.get('createdAt')) - shout_oid = entry.get('contentItem') - if not shout_oid in storage['shouts']['by_oid']: - if len(storage['shouts']['by_oid']) > 0: - return shout_oid - else: - print('[migration] no shouts migrated yet') - raise Exception - return - else: - with local_session() as session: - author = session.query(User).filter(User.oid == entry['createdBy']).first() - shout_dict = storage['shouts']['by_oid'][shout_oid] - if shout_dict: - reaction_dict['shout'] = shout_dict['slug'] - reaction_dict['createdBy'] = author.slug if author else 'discours' - reaction_dict['kind'] = ReactionKind.COMMENT - - # creating reaction from old comment - reaction = Reaction.create(**reaction_dict) - - reaction_dict['id'] = reaction.id - for comment_rating_old in entry.get('ratings',[]): - rater = session.query(User).filter(User.oid == comment_rating_old['createdBy']).first() - reactedBy = rater if rater else session.query(User).filter(User.slug == 'noname').first() - re_reaction_dict = { - 'shout': reaction_dict['shout'], - 'replyTo': reaction.id, - 'kind': ReactionKind.LIKE if comment_rating_old['value'] > 0 else ReactionKind.DISLIKE, - 'createdBy': reactedBy.slug if reactedBy else 'discours' - } - cts = comment_rating_old.get('createdAt') - if cts: re_reaction_dict['createdAt'] = date_parse(cts) - try: - # creating reaction from old rating - Reaction.create(**re_reaction_dict) - except Exception as e: - print('[migration] comment rating error: %r' % re_reaction_dict) - raise e - else: - print('[migration] error: cannot find shout for comment %r' % reaction_dict) - return reaction - -def migrate_2stage(rr, old_new_id): - reply_oid = rr.get('replyTo') - if not reply_oid: return - new_id = old_new_id.get(rr.get('oid')) - if not new_id: return - with local_session() as session: - comment = session.query(Reaction).filter(Reaction.id == new_id).first() - comment.replyTo = old_new_id.get(reply_oid) - comment.save() - session.commit() - if not rr['body']: raise Exception(rr) diff --git a/migration/tables/content_items.py b/migration/tables/content_items.py deleted file mode 100644 index 2ff7aa22..00000000 --- a/migration/tables/content_items.py +++ /dev/null @@ -1,226 +0,0 @@ -from dateutil.parser import parse as date_parse -import sqlalchemy -from orm.shout import Shout, ShoutTopic, User -from storages.viewed import ViewedByDay -from transliterate import translit -from datetime import datetime -from orm.base import local_session -from migration.extract import prepare_body -from orm.community import Community -from orm.reaction import Reaction, ReactionKind - -OLD_DATE = '2016-03-05 22:22:00.350000' -ts = datetime.now() -type2layout = { - 'Article': 'article', - 'Literature': 'prose', - 'Music': 'music', - 'Video': 'video', - 'Image': 'image' -} - -def get_shout_slug(entry): - slug = entry.get('slug', '') - if not slug: - for friend in entry.get('friendlySlugs', []): - slug = friend.get('slug', '') - if slug: break - return slug - -def migrate(entry, storage): - # init, set title and layout - r = { - 'layout': type2layout[entry['type']], - 'title': entry['title'], - 'community': Community.default_community.id, - 'authors': [], - 'topics': set([]), - # 'rating': 0, - # 'ratings': [], - 'createdAt': [] - } - topics_by_oid = storage['topics']['by_oid'] - users_by_oid = storage['users']['by_oid'] - - # author - - oid = entry.get('createdBy', entry.get('_id', entry.get('oid'))) - userdata = users_by_oid.get(oid) - if not userdata: - app = entry.get('application') - if app: - userslug = translit(app['name'], 'ru', reversed=True)\ - .replace(' ', '-')\ - .replace('\'', '')\ - .replace('.', '-').lower() - userdata = { - 'username': app['email'], - 'email': app['email'], - 'name': app['name'], - 'bio': app.get('bio', ''), - 'emailConfirmed': False, - 'slug': userslug, - 'createdAt': ts, - 'wasOnlineAt': ts - } - else: - userdata = User.default_user.dict() - assert userdata, 'no user found for %s from ' % [oid, len(users_by_oid.keys())] - r['authors'] = [userdata, ] - - # slug - - slug = get_shout_slug(entry) - if slug: r['slug'] = slug - else: raise Exception - - # cover - c = '' - if entry.get('thumborId'): - c = 'https://assets.discours.io/unsafe/1600x/' + entry['thumborId'] - else: - c = entry.get('image', {}).get('url') - if not c or 'cloudinary' in c: c = '' - r['cover'] = c - - # timestamps - - r['createdAt'] = date_parse(entry.get('createdAt', OLD_DATE)) - r['updatedAt'] = date_parse(entry['updatedAt']) if 'updatedAt' in entry else ts - if entry.get('published'): - r['publishedAt'] = date_parse(entry.get('publishedAt', OLD_DATE)) - if r['publishedAt'] == OLD_DATE: r['publishedAt'] = ts - if 'deletedAt' in entry: r['deletedAt'] = date_parse(entry['deletedAt']) - - # topics - category = entry['category'] - mainTopic = topics_by_oid.get(category) - if mainTopic: - r['mainTopic'] = storage['replacements'].get(mainTopic["slug"], mainTopic["slug"]) - topic_oids = [category, ] - topic_oids.extend(entry.get('tags', [])) - for oid in topic_oids: - if oid in storage['topics']['by_oid']: - r['topics'].add(storage['topics']['by_oid'][oid]['slug']) - else: - print('[migration] unknown old topic id: ' + oid) - r['topics'] = list(r['topics']) - - entry['topics'] = r['topics'] - entry['cover'] = r['cover'] - entry['authors'] = r['authors'] - - # body - r['body'] = prepare_body(entry) - - # save shout to db - - s = object() - shout_dict = r.copy() - user = None - del shout_dict['topics'] # FIXME: AttributeError: 'str' object has no attribute '_sa_instance_state' - #del shout_dict['rating'] # FIXME: TypeError: 'rating' is an invalid keyword argument for Shout - #del shout_dict['ratings'] - email = userdata.get('email') - slug = userdata.get('slug') - with local_session() as session: - # c = session.query(Community).all().pop() - if email: user = session.query(User).filter(User.email == email).first() - if not user and slug: user = session.query(User).filter(User.slug == slug).first() - if not user and userdata: - try: user = User.create(**userdata) - except sqlalchemy.exc.IntegrityError: - print('[migration] user error: ' + userdata) - userdata['id'] = user.id - userdata['createdAt'] = user.createdAt - storage['users']['by_slug'][userdata['slug']] = userdata - storage['users']['by_oid'][entry['_id']] = userdata - assert user, 'could not get a user' - shout_dict['authors'] = [ user, ] - - try: - s = Shout.create(**shout_dict) - except sqlalchemy.exc.IntegrityError as e: - with local_session() as session: - s = session.query(Shout).filter(Shout.slug == shout_dict['slug']).first() - bump = False - if s: - for key in shout_dict: - if key in s.__dict__: - if s.__dict__[key] != shout_dict[key]: - print('[migration] shout already exists, but differs in %s' % key) - bump = True - else: - print('[migration] shout already exists, but lacks %s' % key) - bump = True - if bump: - s.update(shout_dict) - else: - print('[migration] something went wrong with shout: \n%r' % shout_dict) - raise e - session.commit() - except: - print(s) - raise Exception - - - # shout topics aftermath - shout_dict['topics'] = [] - for tpc in r['topics']: - oldslug = tpc - newslug = storage['replacements'].get(oldslug, oldslug) - if newslug: - with local_session() as session: - shout_topic_old = session.query(ShoutTopic)\ - .filter(ShoutTopic.shout == shout_dict['slug'])\ - .filter(ShoutTopic.topic == oldslug).first() - if shout_topic_old: - shout_topic_old.update({ 'slug': newslug }) - else: - shout_topic_new = session.query(ShoutTopic)\ - .filter(ShoutTopic.shout == shout_dict['slug'])\ - .filter(ShoutTopic.topic == newslug).first() - if not shout_topic_new: - try: ShoutTopic.create(**{ 'shout': shout_dict['slug'], 'topic': newslug }) - except: print('[migration] shout topic error: ' + newslug) - session.commit() - if newslug not in shout_dict['topics']: - shout_dict['topics'].append(newslug) - else: - print('[migration] ignored topic slug: \n%r' % tpc['slug']) - # raise Exception - - # content_item ratings to reactions - try: - for content_rating in entry.get('ratings',[]): - with local_session() as session: - rater = session.query(User).filter(User.oid == content_rating['createdBy']).first() - reactedBy = rater if rater else session.query(User).filter(User.slug == 'noname').first() - if rater: - reaction_dict = { - 'kind': ReactionKind.LIKE if content_rating['value'] > 0 else ReactionKind.DISLIKE, - 'createdBy': reactedBy.slug, - 'shout': shout_dict['slug'] - } - cts = content_rating.get('createdAt') - if cts: reaction_dict['createdAt'] = date_parse(cts) - reaction = session.query(Reaction).\ - filter(Reaction.shout == reaction_dict['shout']).\ - filter(Reaction.createdBy == reaction_dict['createdBy']).\ - filter(Reaction.kind == reaction_dict['kind']).first() - if reaction: - reaction_dict['kind'] = ReactionKind.AGREE if content_rating['value'] > 0 else ReactionKind.DISAGREE, - reaction.update(reaction_dict) - else: Reaction.create(**reaction_dict) - # shout_dict['ratings'].append(reaction_dict) - except: - print('[migration] content_item.ratings error: \n%r' % content_rating) - raise Exception - - # shout views - ViewedByDay.create( shout = shout_dict['slug'], value = entry.get('views', 1) ) - # del shout_dict['ratings'] - shout_dict['oid'] = entry.get('_id') - storage['shouts']['by_oid'][entry['_id']] = shout_dict - storage['shouts']['by_slug'][slug] = shout_dict - return shout_dict diff --git a/migration/tables/email_subscriptions.py b/migration/tables/email_subscriptions.py deleted file mode 100644 index f148701f..00000000 --- a/migration/tables/email_subscriptions.py +++ /dev/null @@ -1,2 +0,0 @@ -def migrate(entry): - return entry \ No newline at end of file diff --git a/migration/tables/replacements.json b/migration/tables/replacements.json deleted file mode 100644 index 621a8e68..00000000 --- a/migration/tables/replacements.json +++ /dev/null @@ -1,755 +0,0 @@ -{ - "1990-e": "90s", - "2000-e": "2000s", - "90-e": "90s", - "207": "207", - "kartochki-rubinshteyna": "rubinstein-cards", - "Georgia": "georgia", - "Japan": "japan", - "Sweden": "sweden", - "abstraktsiya": "abstract", - "absurdism": "absurdism", - "acclimatization": "acclimatisation", - "activism": "activism", - "adolf-gitler": "adolf-hitler", - "afrika": "africa", - "agata-kristi": "agatha-christie", - "agressiya": "agression", - "agressivnoe-povedenie": "agression", - "aktsii": "actions", - "aktsionizm": "actionism", - "alber-kamyu": "albert-kamus", - "albomy": "albums", - "aleksandr-griboedov": "aleksander-griboedov", - "aleksandr-pushkin": "aleksander-pushkin", - "aleksandr-solzhenitsyn": "aleksander-solzhenitsyn", - "aleksandr-vvedenskiy": "aleksander-vvedensky", - "aleksey-navalnyy": "alexey-navalny", - "alfavit": "alphabet", - "alkogol": "alcohol", - "alternativa": "alternative", - "alternative": "alternative", - "alternativnaya-istoriya": "alternative-history", - "amerika": "america", - "anarhizm": "anarchism", - "anatoliy-mariengof": "anatoly-mariengof", - "ancient-russia": "ancient-russia", - "andegraund": "underground", - "andrey-platonov": "andrey-platonov", - "andrey-rodionov": "andrey-rodionov", - "andrey-tarkovskiy": "andrey-tarkovsky", - "angliyskie-istorii": "english-stories", - "angliyskiy-yazyk": "english-langugae", - "animation": "animation", - "animatsiya": "animation", - "anime": "anime", - "anri-volohonskiy": "anri-volohonsky", - "antifashizm": "anti-faschism", - "antiquity": "antiquity", - "antiutopiya": "dystopia", - "antropology": "antropology", - "antropotsen": "antropocenus", - "architecture": "architecture", - "arheologiya": "archeology", - "arhetipy": "archetypes", - "arhiv": "archive", - "aristokraty": "aristocracy", - "aristotel": "aristotle", - "arktika": "arctic", - "armiya": "army", - "art": "art", - "art-is": "art-is", - "artists": "artists", - "ateizm": "atheism", - "audiopoeziya": "audio-poetry", - "audio-poetry": "audio-poetry", - "audiospektakl": "audio-spectacles", - "auktsyon": "auktsyon", - "avangard": "avantgarde", - "avtofikshn": "autofiction", - "avtorskaya-pesnya": "bardsongs", - "azbuka-immigratsii": "immigration-basics", - "aziatskiy-kinematograf": "asian-cinema", - "b-movie": "b-movie", - "bannye-chteniya": "sauna-reading", - "bardsongs": "bardsongs", - "bdsm": "bdsm", - "belarus": "belarus", - "belgiya": "belgium", - "bertold-breht": "berttold-brecht", - "bezumie": "madness", - "biography": "biography", - "biologiya": "biology", - "bipolyarnoe-rasstroystvo": "bipolar-disorder", - "bitniki": "beatnics", - "biznes": "business", - "blizhniy-vostok": "middle-east", - "blizost": "closeness", - "blokada": "blockade", - "bob-dilan": "bob-dylan", - "bog": "god", - "bol": "pain", - "bolotnoe-delo": "bolotnaya-case", - "books": "books", - "boris-eltsin": "boris-eltsin", - "boris-godunov": "boris-godunov", - "boris-grebenschikov": "boris-grebenschikov", - "boris-nemtsov": "boris-nemtsov", - "boris-pasternak": "boris-pasternak", - "brak": "marriage", - "bret-iston-ellis": "bret-iston-ellis", - "buddizm": "buddhism", - "bullying": "bullying", - "bunt": "riot", - "burning-man": "burning-man", - "bytie": "being", - "byurokratiya": "bureaucracy", - "capitalism": "capitalism", - "censored-in-russia": "censored-in-russia", - "ch-rno-beloe": "black-and-white", - "ch-rnyy-yumor": "black-humour", - "chapters": "chapters", - "charity": "charity", - "chayldfri": "childfree", - "chechenskaya-voyna": "chechen-war", - "chechnya": "chechnya", - "chelovek": "male", - "chernobyl": "chernobyl", - "chernyy-yumor": "black-humour", - "children": "children", - "china": "china", - "chinovniki": "bureaucracy", - "chukotka": "chukotka", - "chuma": "plague", - "church": "church", - "cinema": "cinema", - "city": "city", - "civil-position": "civil-position", - "clips": "clips", - "collage": "collage", - "comics": "comics", - "conspiracy-theory": "conspiracy-theory", - "contemporary-art": "contemporary-art", - "contemporary-poetry": "poetry", - "contemporary-prose": "prose", - "coronavirus": "coronavirus", - "corruption": "corruption", - "creative-writing-school": "creative-writing-school", - "crime": "crime", - "criticism": "criticism", - "critiques": "reviews", - "culture": "culture", - "dadaizm": "dadaism", - "daniel-defo": "daniel-defoe", - "daniil-harms": "daniil-kharms", - "dante-aligeri": "dante-alighieri", - "darkveyv": "darkwave", - "death": "death", - "debaty": "debats", - "delo-seti": "seti-case", - "democracy": "democracy", - "demografiya": "demographics", - "demonstrations": "demonstrations", - "depression": "depression", - "derevnya": "village", - "design": "design", - "detskie-doma": "orphanages", - "detstvo": "childhood", - "digital": "digital", - "digital-art": "digital-art", - "directing": "directing", - "diskurs": "discours", - "diskurs-1": "discourse", - "dissidenty": "dissidents", - "diy": "diy", - "dmitriy-donskoy": "dmitriy-donskoy", - "dmitriy-prigov": "dmitriy-prigov", - "dnevniki": "dairies", - "documentary": "documentary", - "dokumenty": "doсuments", - "domashnee-nasilie": "home-terror", - "donald-tramp": "donald-trump", - "donbass": "donbass", - "donorstvo": "donation", - "drama": "drama", - "dramaturgy": "dramaturgy", - "drawing": "drawing", - "drevo-zhizni": "tree-of-life", - "drugs": "drugs", - "dzhaz": "jazz", - "dzhek-keruak": "jack-keruak", - "dzhim-morrison": "jim-morrison", - "dzhordzh-romero": "george-romero", - "dzhordzho-agamben": "giorgio-agamben", - "ecology": "ecology", - "economics": "economics", - "eda": "food", - "editing": "editing", - "editorial-statements": "editorial-statements", - "eduard-limonov": "eduard-limonov", - "education": "education", - "egor-letov": "egor-letov", - "eksperiment": "experiments", - "eksperimentalnaya-muzyka": "experimental-music", - "ekspressionizm": "expressionism", - "ekstremizm": "extremism", - "ekzistentsializm-1": "existentialism", - "elections": "elections", - "electronic": "electronics", - "electronics": "electronics", - "elena-glinskaya": "elena-glinskaya", - "elena-guro": "elena-guro", - "elizaveta-mnatsakanova": "elizaveta-mnatsakanova", - "embient": "ambient", - "emigration": "emigration", - "emil-dyurkgeym": "emile-durkheim", - "emotsii": "emotions", - "empiric": "empiric", - "epidemiya": "pandemic", - "erich-von-neff": "erich-von-neff", - "erotika": "erotics", - "essay": "essay", - "estetika": "aestetics", - "etika": "ethics", - "etnos": "ethnics", - "everyday-life": "everyday-life", - "evgeniy-onegin": "eugene-onegin", - "evolyutsiya": "evolution", - "exhibitions": "exhibitions", - "experience": "experiences", - "experimental": "experimental", - "experimental-music": "experimental-music", - "explanation": "explanation", - "faktcheking": "fact-checking", - "falsifikatsii": "falsifications", - "family": "family", - "fanfiki": "fan-fiction", - "fantastika": "sci-fi", - "fatalizm": "fatalism", - "fedor-dostoevskiy": "fedor-dostoevsky", - "fedor-ioannovich": "fedor-ioannovich", - "feleton": "feuilleton", - "feminism": "feminism", - "fenomenologiya": "phenomenology", - "fentezi": "fantasy", - "festival": "festival", - "festival-territoriya": "festival-territory", - "folk": "folk", - "folklor": "folklore", - "fotoreportazh": "photoreports", - "france": "france", - "frants-kafka": "franz-kafka", - "frederik-begbeder": "frederick-begbeder", - "freedom": "freedom", - "friendship": "friendship", - "fsb": "fsb", - "futbol": "footbool", - "future": "future", - "futuristy": "futurists", - "futurizm": "futurism", - "galereya": "gallery", - "gdr": "gdr", - "gender": "gender", - "gendernyy-diskurs": "gender", - "gennadiy-aygi": "gennadiy-aygi", - "gerhard-rihter": "gerhard-rihter", - "germaniya": "germany", - "germenevtika": "hermeneutics", - "geroi": "heroes", - "girls": "girls", - "gkchp": "gkchp", - "glitch": "glitch", - "globalizatsiya": "globalisation", - "gollivud": "hollywood", - "gonzo": "gonzo", - "gore-ot-uma": "woe-from-wit", - "graffiti": "graffiti", - "graphics": "graphics", - "gravyura": "engraving", - "grazhdanskaya-oborona": "grazhdanskaya-oborona", - "gretsiya": "greece", - "gulag": "gulag", - "han-batyy": "khan-batyy", - "health": "health", - "himiya": "chemistry", - "hip-hop": "hip-hop", - "history": "history", - "history-of-russia": "history-of-russia", - "holokost": "holocaust", - "horeografiya": "choreography", - "horror": "horror", - "hospis": "hospice", - "hristianstvo": "christianity", - "humans": "humans", - "humour": "humour", - "ideologiya": "ideology", - "idm": "idm", - "igil": "isis", - "igor-pomerantsev": "igor-pomerantsev", - "igra-prestolov": "game-of-throne", - "igry": "games", - "iisus-hristos": "jesus-christ", - "illness": "illness", - "illustration-history": "illustration-history", - "illustrations": "illustrations", - "imazhinizm": "imagism", - "immanuil-kant": "immanuel-kant", - "impressionizm": "impressionism", - "improvizatsiya": "improvisation", - "indi": "indie", - "individualizm": "individualism", - "infografika": "infographics", - "informatsiya": "information", - "ingmar-bergman": "ingmar-bergman", - "inklyuziya": "inclusion", - "installyatsiya": "installation", - "internet": "internet", - "interview": "interview", - "invalidnost": "disability", - "investigations": "investigations", - "iosif-brodskiy": "joseph-brodsky", - "iosif-stalin": "joseph-stalin", - "iskusstvennyy-intellekt": "artificial-intelligence", - "islam": "islam", - "istoriya-moskvy": "moscow-history", - "istoriya-teatra": "theatre-history", - "italiya": "italy", - "italyanskiy-yazyk": "italian-language", - "iudaika": "judaica", - "ivan-groznyy": "ivan-grozny", - "ivan-iii-gorbatyy": "ivan-iii-gorbaty", - "ivan-kalita": "ivan-kalita", - "ivan-krylov": "ivan-krylov", - "izobreteniya": "inventions", - "izrail-1": "israel", - "jazz": "jazz", - "john-lennon": "john-lennon", - "journalism": "journalism", - "justice": "justice", - "k-pop": "k-pop", - "kalligrafiya": "calligraphy", - "karikatura": "caricatures", - "katrin-nenasheva": "katrin-nenasheva", - "kavkaz": "caucasus", - "kazan": "kazan", - "kiberbezopasnost": "cybersecurity", - "kinoklub": "cinema-club", - "kirill-serebrennikov": "kirill-serebrennikov", - "klassika": "classic", - "kollektivnoe-bessoznatelnoe": "сollective-unconscious", - "komediya": "comedy", - "kommunikatsii": "communications", - "kommunizm": "communism", - "kommuny": "communes", - "kompyuternye-igry": "computer-games", - "konservatizm": "conservatism", - "kontrkultura": "counter-culture", - "kontseptualizm": "conceptualism", - "korotkometrazhka": "cinema-shorts", - "kosmos": "cosmos", - "kraudfanding": "crowdfunding", - "krizis": "crisis", - "krov": "blood", - "krym": "crimea", - "kulturologiya": "culturology", - "kulty": "cults", - "kurdistan": "kurdistan", - "kurt-kobeyn": "kurt-cobain", - "kurt-vonnegut": "kurt-vonnegut", - "kvir": "queer", - "laboratoriya": "lab", - "language": "languages", - "lars-fon-trier": "lars-fon-trier", - "laws": "laws", - "lectures": "lectures", - "leto": "summer", - "lev-tolstoy": "leo-tolstoy", - "lgbt": "lgbt", - "liberalizm": "liberalism", - "libertarianstvo": "libertarianism", - "life": "life", - "likbez": "likbez", - "lingvistika": "linguistics", - "lirika": "lirics", - "literary-studies": "literary-studies", - "literature": "literature", - "lo-fi": "lo-fi", - "love": "love", - "luzha-goluboy-krovi": "luzha-goluboy-krovi", - "lyudvig-vitgenshteyn": "ludwig-wittgenstein", - "lzhedmitriy": "false-dmitry", - "lzhenauka": "pseudoscience", - "maks-veber": "max-weber", - "manifests": "manifests", - "manipulyatsii-soznaniem": "mind-manipulation", - "marina-abramovich": "marina-abramovich", - "marketing": "marketing", - "marksizm": "marxism", - "marsel-dyushan": "marchel-duchamp", - "martin-haydegger": "martin-hidegger", - "matematika": "maths", - "vladimir-mayakovskiy": "vladimir-mayakovsky", - "mayakovskiy": "vladimir-mayakovsky", - "ekzistentsiya": "existence", - "media": "media", - "medicine": "medicine", - "memuary": "memoirs", - "menedzhment": "management", - "merab-mamardashvili": "merab-mamardashvili", - "mest": "revenge", - "metamodernizm": "metamodern", - "metavselennaya": "metaverse", - "metro": "metro", - "mifologiya": "mythology", - "mify": "myth", - "mihael-haneke": "michael-haneke", - "mihail-baryshnikov": "mihail-baryshnikov", - "mihail-bulgakov": "mihail-bulgakov", - "mikrotonalnaya-muzyka": "mikrotone-muzyka", - "minimalizm": "minimalism", - "minkult-privet": "minkult-privet", - "mir": "world", - "mirovozzrenie": "mindsets", - "mishel-fuko": "michel-foucault", - "mistika": "mystics", - "mitropolit-makariy": "mitropolit-makariy", - "mlm": "mlm", - "moda": "fashion", - "modernizm": "modernism", - "mokyumentari": "mockumentary", - "moloko-plus": "moloko-plus", - "money": "money", - "monologs": "monologues", - "monstratsiya": "monstration", - "moralnaya-otvetstvennost": "moral-responsibility", - "more": "sea", - "moscow": "moscow", - "moshennichestvo": "frauds", - "moskovskiy-romanticheskiy-kontseptualizm": "moscow-romantic-conceptualism", - "moskovskoe-delo": "moscow-case", - "movies": "movies", - "mozg": "brain", - "multiplikatsiya": "animation", - "music": "music", - "muzei": "museum", - "muzey": "museum", - "muzhchiny": "man", - "myshlenie": "thinking", - "nagornyy-karabah": "nagorno-karabakh", - "natsionalizm": "nationalism", - "natsionalnaya-ideya": "national-idea", - "natsizm": "nazism", - "natyurmort": "nature-morte", - "nauchpop": "pop-science", - "nbp": "nbp", - "nenavist": "hate", - "neofitsialnaya-literatura": "unofficial-literature", - "neoklassika": "neoclassic", - "neprozrachnye-smysly": "hidden-meanings", - "neravenstvo": "inequality", - "new-year": "new-year", - "neyronauka": "neuro-science", - "neyroseti": "neural-networks", - "niu-vshe": "hse", - "nizhniy-novgorod": "nizhny-novgorod", - "nko": "nonprofits", - "nlo": "ufo", - "nobelevskaya-premiya": "nobel-prize", - "noize-mc": "noize-mc", - "nonkonformizm": "nonconformism", - "novaya-drama": "new-drama", - "novosti": "news", - "noyz": "noise", - "oberiu": "oberiu", - "ocherk": "etudes", - "ochevidnyy-nuar": "ochevidnyy-nuar", - "odinochestvo": "loneliness", - "odna-kniga-odna-istoriya": "one-book-one-story", - "okrainy": "outskirts", - "opinions": "opinions", - "oppozitsiya": "opposition", - "orhan-pamuk": "orhan-pamuk", - "ornitologiya": "ornitology", - "osip-mandelshtam": "osip-mandelshtam", - "oskar-uayld": "oscar-wilde", - "osoznanie": "awareness", - "otnosheniya": "relationship", - "pablo-pikasso": "pablo-picasso", - "painting": "painting", - "paintings": "painting", - "pamyat": "memory", - "pandemiya": "pandemic", - "parizh": "paris", - "patriotizm": "patriotism", - "paul-tselan": "paul-tselan", - "per-burd": "pierre-bourdieu", - "performance": "performance", - "peyzazh": "landscape", - "philology": "philology", - "philosophy": "philosophy", - "photo": "photography", - "photography": "photography", - "photoprojects": "photoprojects", - "plakaty": "posters", - "plastilin": "plasticine", - "plays": "plays", - "podrostki": "teenagers", - "poema": "poem", - "poems": "poems", - "poeticheskaya-proza": "poetic-prose", - "poetry": "poetry", - "poetry-of-squares": "poetry-of-squares", - "poetry-slam": "poetry-slam", - "police": "police", - "politics": "politics", - "polsha": "poland", - "pop-art": "pop-art", - "pop-culture": "pop-culture", - "pornografiya": "pornography", - "portret": "portrait", - "poslovitsy": "proverbs", - "post-pank": "post-punk", - "post-rok": "post-rock", - "postmodernism": "postmodernism", - "povest": "novells", - "povsednevnost": "everyday-life", - "power": "power", - "pravo": "right", - "pravoslavie": "orthodox", - "pravozaschitniki": "human-rights-activism", - "prazdnik": "holidays", - "predatelstvo": "betrayal", - "predprinimatelstvo": "entrepreneurship", - "premera": "premier", - "premiya-oskar": "oscar-prize", - "pribaltika-1": "baltic", - "priroda": "nature", - "prison": "prison", - "pritcha": "parable", - "privatnost": "privacy", - "progress": "progress", - "projects": "projects", - "prokrastinatsiya": "procrastination", - "propaganda": "propaganda", - "proschenie": "forgiveness", - "prose": "prose", - "proshloe": "past", - "prostitutsiya": "prostitution", - "prosveschenie": "enlightenment", - "protests": "protests", - "psalmy": "psalms", - "psihoanaliz": "psychoanalysis", - "psihodeliki": "psychodelics", - "pskov": "pskov", - "psychiatry": "psychiatry", - "psychology": "psychology", - "punk": "punk", - "r-b": "rnb", - "realizm": "realism", - "redaktura": "editorial", - "refleksiya": "reflection", - "reggi": "reggae", - "religion": "religion", - "rene-zhirar": "rene-girard", - "renesanss": "renessance", - "renovatsiya": "renovation", - "rep": "rap", - "reportage": "reportage", - "repressions": "repressions", - "research": "research", - "retroveyv": "retrowave", - "review": "review", - "revolution": "revolution", - "rezo-gabriadze": "rezo-gabriadze", - "risunki": "painting", - "roboty": "robots", - "rock": "rock", - "roditeli": "parents", - "romantizm": "romantism", - "romany": "novell", - "ronald-reygan": "ronald-reygan", - "roskomnadzor": "roskomnadzor", - "rossiyskoe-kino": "russian-cinema", - "rozhava": "rojava", - "rpts": "rpts", - "rus-na-grani-sryva": "rus-na-grani-sryva", - "russia": "russia", - "russian-language": "russian-language", - "russian-literature": "russian-literature", - "russkiy-mir": "russkiy-mir", - "salvador-dali": "salvador-dali", - "samoidentifikatsiya": "self-identity", - "samoopredelenie": "self-definition", - "sankt-peterburg": "saint-petersburg", - "sasha-skochilenko": "sasha-skochilenko", - "satira": "satiric", - "saund-art": "sound-art", - "schaste": "hapiness", - "school": "school", - "science": "science", - "sculpture": "sculpture", - "second-world-war": "second-world-war", - "sekond-hend": "second-hand", - "seksprosvet": "sex-education", - "sekty": "sects", - "semiotics": "semiotics", - "serbiya": "serbia", - "serialy": "series", - "sever": "north", - "severnaya-koreya": "north-korea", - "sex": "sex", - "shotlandiya": "scotland", - "shugeyz": "shoegaze", - "siloviki": "siloviki", - "simeon-bekbulatovich": "simeon-bekbulatovich", - "simvolizm": "simbolism", - "siriya": "siria", - "skulptura": "sculpture", - "slavoy-zhizhek": "slavoj-zizek", - "smysl": "meaning", - "sny": "dreams", - "sobytiya": "events", - "social": "society", - "society": "society", - "sociology": "sociology", - "sofya-paleolog": "sofya-paleolog", - "sofya-vitovtovna": "sofya-vitovtovna", - "soobschestva": "communities", - "soprotivlenie": "resistence", - "sotsializm": "socialism", - "sotsialnaya-filosofiya": "social-philosophy", - "sotsseti": "social-networks", - "sotvorenie-tretego-rima": "third-rome", - "sovremennost": "modernity", - "spaces": "spaces", - "spektakl": "spectacles", - "spetseffekty": "special-fx", - "spetsoperatsiya": "special-operation", - "spetssluzhby": "special-services", - "sport": "sport", - "srednevekove": "middle-age", - "state": "state", - "statistika": "statistics", - "stendap": "stand-up", - "stoitsizm": "stoicism", - "stories": "stories", - "stoyanie-na-ugre": "stoyanie-na-ugre", - "strah": "fear", - "street-art": "street-art", - "stsenarii": "scenarios", - "summary": "summary", - "supergeroi": "superheroes", - "svetlana-aleksievich": "svetlana-aleksievich", - "svobodu-ivanu-golunovu": "free-ivan-golunov", - "syurrealizm": "surrealism", - "tales": "tales", - "tanets": "dance", - "tataro-mongolskoe-igo": "mongol-tatar-yoke", - "tatuirovki": "tattoo", - "technology": "technology", - "televidenie": "tv", - "telo": "body", - "telo-kak-iskusstvo": "body-as-art", - "terrorizm": "terrorism", - "tests": "tests", - "text": "texts", - "the-beatles": "the-beatles", - "theater": "theater", - "theory": "theory", - "tokio": "tokio", - "torture": "torture", - "totalitarizm": "totalitarism", - "traditions": "traditions", - "tragicomedy": "tragicomedy", - "transgendernost": "transgender", - "translation": "translation", - "transport": "transport", - "travel": "travel", - "travma": "trauma", - "trendy": "trends", - "tretiy-reyh": "third-reich", - "triller": "thriller", - "tsar": "central-african-republic", - "tsar-edip": "oedipus", - "tsarevich-dmitriy": "tsarevich-dmitry", - "tsennosti": "values", - "tsenzura": "censorship", - "tseremonii": "ceremonies", - "turizm": "tourism", - "tvorchestvo": "creativity", - "ugnetennyy-zhilischnyy-klass": "oppressed-housing-class", - "uilyam-shekspir": "william-shakespeare", - "ukraine": "ukraine", - "university": "university", - "urban-studies": "urban-studies", - "uroki-literatury": "literature-lessons", - "usa": "usa", - "ussr": "ussr", - "utopiya": "utopia", - "valter-benyamin": "valter-benyamin", - "varlam-shalamov": "varlam-shalamov", - "vasiliy-ii-temnyy": "basil-ii-temnyy", - "vasiliy-iii": "basil-iii", - "vdnh": "vdnh", - "vechnost": "ethernety", - "velikobritaniya": "great-britain", - "velimir-hlebnikov": "velimir-hlebnikov", - "velkom-tu-greyt-britn": "welcome-to-great-britain", - "venedikt-erofeev": "venedikt-erofeev", - "venetsiya": "veneece", - "vengriya": "hungary", - "verlibry": "free-verse", - "veschi": "things", - "vessels": "vessels", - "veterany": "veterans", - "video": "video", - "videoart": "videoart", - "videoklip": "clips", - "videopoeziya": "video-poetry", - "viktor-astafev": "viktor-astafev", - "viktor-pelevin": "viktor-pelevin", - "vilgelm-rayh": "wilhelm-reich", - "vinzavod": "vinzavod", - "violence": "violence", - "visual-culture": "visual-culture", - "vizualnaya-poeziya": "visual-poetry", - "vladimir-lenin": "vladimir-lenin", - "vladimir-nabokov": "vladimir-nabokov", - "vladimir-putin": "vladimir-putin", - "vladimir-sorokin": "vladimir-sorokin", - "vladimir-voynovich": "vladimir-voynovich", - "volga": "volga", - "volontery": "volonteurs", - "vong-karvay": "wong-karwai", - "vospominaniya": "memories", - "vostok": "east", - "vremya": "time", - "vudi-allen": "woody-allen", - "vynuzhdennye-otnosheniya": "forced-relationship", - "war": "war", - "war-in-ukraine-images": "war-in-ukrahine-images", - "women": "women", - "work": "work", - "writers": "writers", - "xx-century": "xx-century", - "yakob-yordans": "yakob-yordans", - "yan-vermeer": "yan-vermeer", - "yanka-dyagileva": "yanka-dyagileva", - "yaponskaya-literatura": "japan-literature", - "youth": "youth", - "yozef-rot": "yozef-rot", - "yurgen-habermas": "jorgen-habermas", - "za-liniey-mannergeyma": "behind-mannerheim-line", - "zahar-prilepin": "zahar-prilepin", - "zakonodatelstvo": "laws", - "zakony-mira": "world-laws", - "zametki": "notes", - "zhelanie": "wish", - "konets-vesny": "end-of-spring", - "zhivotnye": "animals", - "zhoze-saramago": "jose-saramago", - "zigmund-freyd": "sigmund-freud", - "zolotaya-orda": "golden-horde", - "zombi": "zombie", - "zombi-simpsony": "zombie-simpsons" -} \ No newline at end of file diff --git a/migration/tables/topics.py b/migration/tables/topics.py deleted file mode 100644 index 33c7a8d6..00000000 --- a/migration/tables/topics.py +++ /dev/null @@ -1,28 +0,0 @@ -from migration.extract import extract, html2text -from orm.base import local_session -from orm import Topic, Community - -def migrate(entry): - body_orig = entry.get('description', '').replace(' ', ' ') - topic_dict = { - 'slug': entry['slug'], - 'oid': entry['_id'], - 'title': entry['title'].replace(' ', ' '), #.lower(), - 'children': [], - 'community' : Community.default_community.slug - } - topic_dict['body'] = extract(html2text(body_orig), entry['_id']) - with local_session() as session: - slug = topic_dict['slug'] - topic = session.query(Topic).filter(Topic.slug == slug).first() - if not topic: - topic = Topic.create(**topic_dict) - if len(topic.title) > len(topic_dict['title']): - topic.update({ 'title': topic_dict['title'] }) - if len(topic.body) < len(topic_dict['body']): - topic.update({ 'body': topic_dict['body'] }) - session.commit() - # print(topic.__dict__) - rt = topic.__dict__.copy() - del rt['_sa_instance_state'] - return rt diff --git a/migration/tables/users.py b/migration/tables/users.py deleted file mode 100644 index bbae0cec..00000000 --- a/migration/tables/users.py +++ /dev/null @@ -1,105 +0,0 @@ -import sqlalchemy -from orm import User, UserRating -from dateutil.parser import parse -from orm.base import local_session - -def migrate(entry): - if 'subscribedTo' in entry: del entry['subscribedTo'] - email = entry['emails'][0]['address'] - user_dict = { - 'oid': entry['_id'], - 'roles': [], - 'ratings': [], - 'username': email, - 'email': email, - 'password': entry['services']['password'].get('bcrypt', ''), - 'createdAt': parse(entry['createdAt']), - 'emailConfirmed': bool(entry['emails'][0]['verified']), - 'muted': False, # amnesty - 'bio': entry['profile'].get('bio', ''), - 'notifications': [], - 'createdAt': parse(entry['createdAt']), - 'roles': [], # entry['roles'] # roles by community - 'ratings': [], # entry['ratings'] - 'links': [], - 'name': 'anonymous' - } - if 'updatedAt' in entry: user_dict['updatedAt'] = parse(entry['updatedAt']) - if 'wasOnineAt' in entry: user_dict['wasOnlineAt'] = parse(entry['wasOnlineAt']) - if entry.get('profile'): - # slug - user_dict['slug'] = entry['profile'].get('path') - user_dict['bio'] = entry['profile'].get('bio','') - - # userpic - try: user_dict['userpic'] = 'https://assets.discours.io/unsafe/100x/' + entry['profile']['thumborId'] - except KeyError: - try: user_dict['userpic'] = entry['profile']['image']['url'] - except KeyError: user_dict['userpic'] = '' - - # name - fn = entry['profile'].get('firstName', '') - ln = entry['profile'].get('lastName', '') - name = user_dict['slug'] if user_dict['slug'] else 'noname' - name = fn if fn else name - name = (name + ' ' + ln) if ln else name - name = entry['profile']['path'].lower().replace(' ', '-') if len(name) < 2 else name - user_dict['name'] = name - - # links - fb = entry['profile'].get('facebook', False) - if fb: user_dict['links'].append(fb) - vk = entry['profile'].get('vkontakte', False) - if vk: user_dict['links'].append(vk) - tr = entry['profile'].get('twitter', False) - if tr: user_dict['links'].append(tr) - ws = entry['profile'].get('website', False) - if ws: user_dict['links'].append(ws) - - # some checks - if not user_dict['slug'] and len(user_dict['links']) > 0: - user_dict['slug'] = user_dict['links'][0].split('/')[-1] - - user_dict['slug'] = user_dict.get('slug', user_dict['email'].split('@')[0]) - oid = user_dict['oid'] - try: user = User.create(**user_dict.copy()) - except sqlalchemy.exc.IntegrityError: - print('[migration] cannot create user ' + user_dict['slug']) - with local_session() as session: - old_user = session.query(User).filter(User.slug == user_dict['slug']).first() - old_user.oid = oid - user = old_user - if not user: - print('[migration] ERROR: cannot find user ' + user_dict['slug']) - raise Exception - user_dict['id'] = user.id - return user_dict - -def migrate_2stage(entry, id_map): - ce = 0 - for rating_entry in entry.get('ratings',[]): - rater_oid = rating_entry['createdBy'] - rater_slug = id_map.get(rater_oid) - if not rater_slug: - ce +=1 - # print(rating_entry) - continue - oid = entry['_id'] - author_slug = id_map.get(oid) - user_rating_dict = { - 'value': rating_entry['value'], - 'rater': rater_slug, - 'user': author_slug - } - with local_session() as session: - try: - user_rating = UserRating.create(**user_rating_dict) - except sqlalchemy.exc.IntegrityError: - old_rating = session.query(UserRating).filter(UserRating.rater == rater_slug).first() - print('[migration] cannot create ' + author_slug + '`s rate from ' + rater_slug) - print('[migration] concat rating value %d+%d=%d' % (old_rating.value, rating_entry['value'], old_rating.value + rating_entry['value'])) - old_rating.update({ 'value': old_rating.value + rating_entry['value'] }) - session.commit() - except Exception as e: - print(e) - return ce diff --git a/migration/utils.py b/migration/utils.py deleted file mode 100644 index 9a19c556..00000000 --- a/migration/utils.py +++ /dev/null @@ -1,9 +0,0 @@ -from datetime import datetime -from json import JSONEncoder - -class DateTimeEncoder(JSONEncoder): - def default(self, z): - if isinstance(z, datetime): - return (str(z)) - else: - return super().default(z) \ No newline at end of file diff --git a/poetry.lock b/poetry.lock deleted file mode 100644 index 2bb33db0..00000000 --- a/poetry.lock +++ /dev/null @@ -1,902 +0,0 @@ -[[package]] -name = "aioredis" -version = "2.0.1" -description = "asyncio (PEP 3156) Redis support" -category = "main" -optional = false -python-versions = ">=3.6" - -[package.dependencies] -async-timeout = "*" -typing-extensions = "*" - -[package.extras] -hiredis = ["hiredis (>=1.0)"] - -[[package]] -name = "anyio" -version = "3.6.1" -description = "High level compatibility layer for multiple asynchronous event loop implementations" -category = "main" -optional = false -python-versions = ">=3.6.2" - -[package.dependencies] -idna = ">=2.8" -sniffio = ">=1.1" - -[package.extras] -doc = ["packaging", "sphinx-rtd-theme", "sphinx-autodoc-typehints (>=1.2.0)"] -test = ["coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "contextlib2", "uvloop (<0.15)", "mock (>=4)", "uvloop (>=0.15)"] -trio = ["trio (>=0.16)"] - -[[package]] -name = "ariadne" -version = "0.15.1" -description = "Ariadne is a Python library for implementing GraphQL servers." -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -graphql-core = ">=3.2.0,<3.3" -starlette = ">0.17" -typing-extensions = ">=3.6.0" - -[package.extras] -asgi-file-uploads = ["python-multipart (>=0.0.5)"] - -[[package]] -name = "async-timeout" -version = "4.0.2" -description = "Timeout context manager for asyncio programs" -category = "main" -optional = false -python-versions = ">=3.6" - -[[package]] -name = "authlib" -version = "1.0.1" -description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -cryptography = ">=3.2" - -[[package]] -name = "bson" -version = "0.5.10" -description = "BSON codec for Python" -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -python-dateutil = ">=2.4.0" -six = ">=1.9.0" - -[[package]] -name = "certifi" -version = "2022.6.15" -description = "Python package for providing Mozilla's CA Bundle." -category = "main" -optional = false -python-versions = ">=3.6" - -[[package]] -name = "cffi" -version = "1.15.1" -description = "Foreign Function Interface for Python calling C code." -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -pycparser = "*" - -[[package]] -name = "charset-normalizer" -version = "2.1.0" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" -optional = false -python-versions = ">=3.6.0" - -[package.extras] -unicode_backport = ["unicodedata2"] - -[[package]] -name = "click" -version = "8.1.3" -description = "Composable command line interface toolkit" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[[package]] -name = "colorama" -version = "0.4.5" -description = "Cross-platform colored terminal text." -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" - -[[package]] -name = "cryptography" -version = "37.0.3" -description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -category = "main" -optional = false -python-versions = ">=3.6" - -[package.dependencies] -cffi = ">=1.12" - -[package.extras] -docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"] -docstest = ["pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"] -pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] -sdist = ["setuptools_rust (>=0.11.4)"] -ssh = ["bcrypt (>=3.1.5)"] -test = ["pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,!=3.79.2)"] - -[[package]] -name = "graphql-core" -version = "3.2.1" -description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL." -category = "main" -optional = false -python-versions = ">=3.6,<4" - -[[package]] -name = "greenlet" -version = "1.1.2" -description = "Lightweight in-process concurrent programming" -category = "main" -optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" - -[package.extras] -docs = ["sphinx"] - -[[package]] -name = "h11" -version = "0.12.0" -description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -category = "main" -optional = false -python-versions = ">=3.6" - -[[package]] -name = "httpcore" -version = "0.15.0" -description = "A minimal low-level HTTP client." -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -anyio = ">=3.0.0,<4.0.0" -certifi = "*" -h11 = ">=0.11,<0.13" -sniffio = ">=1.0.0,<2.0.0" - -[package.extras] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (>=1.0.0,<2.0.0)"] - -[[package]] -name = "httpx" -version = "0.23.0" -description = "The next generation HTTP client." -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -certifi = "*" -httpcore = ">=0.15.0,<0.16.0" -rfc3986 = {version = ">=1.3,<2", extras = ["idna2008"]} -sniffio = "*" - -[package.extras] -brotli = ["brotlicffi", "brotli"] -cli = ["click (>=8.0.0,<9.0.0)", "rich (>=10,<13)", "pygments (>=2.0.0,<3.0.0)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (>=1.0.0,<2.0.0)"] - -[[package]] -name = "idna" -version = "3.3" -description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" -optional = false -python-versions = ">=3.5" - -[[package]] -name = "itsdangerous" -version = "2.1.2" -description = "Safely pass data to untrusted environments and back." -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "passlib" -version = "1.7.4" -description = "comprehensive password hashing framework supporting over 30 schemes" -category = "main" -optional = false -python-versions = "*" - -[package.extras] -argon2 = ["argon2-cffi (>=18.2.0)"] -bcrypt = ["bcrypt (>=3.1.0)"] -build_docs = ["sphinx (>=1.6)", "sphinxcontrib-fulltoc (>=1.2.0)", "cloud-sptheme (>=1.10.1)"] -totp = ["cryptography"] - -[[package]] -name = "psycopg2-binary" -version = "2.9.3" -description = "psycopg2 - Python-PostgreSQL Database Adapter" -category = "main" -optional = false -python-versions = ">=3.6" - -[[package]] -name = "pycparser" -version = "2.21" -description = "C parser in Python" -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - -[[package]] -name = "pydantic" -version = "1.9.1" -description = "Data validation and settings management using python type hints" -category = "main" -optional = false -python-versions = ">=3.6.1" - -[package.dependencies] -typing-extensions = ">=3.7.4.3" - -[package.extras] -dotenv = ["python-dotenv (>=0.10.4)"] -email = ["email-validator (>=1.0.3)"] - -[[package]] -name = "pyjwt" -version = "2.4.0" -description = "JSON Web Token implementation in Python" -category = "main" -optional = false -python-versions = ">=3.6" - -[package.extras] -crypto = ["cryptography (>=3.3.1)"] -dev = ["sphinx", "sphinx-rtd-theme", "zope.interface", "cryptography (>=3.3.1)", "pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)", "mypy", "pre-commit"] -docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] -tests = ["pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)"] - -[[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" -category = "main" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "python-frontmatter" -version = "1.0.0" -description = "Parse and manage posts with YAML (or other) frontmatter" -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -PyYAML = "*" - -[package.extras] -docs = ["sphinx"] -test = ["pytest", "toml", "pyaml"] - -[[package]] -name = "pyyaml" -version = "6.0" -description = "YAML parser and emitter for Python" -category = "main" -optional = false -python-versions = ">=3.6" - -[[package]] -name = "requests" -version = "2.28.1" -description = "Python HTTP for Humans." -category = "main" -optional = false -python-versions = ">=3.7, <4" - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<3" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<1.27" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "rfc3986" -version = "1.5.0" -description = "Validating URI References per RFC 3986" -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -idna = {version = "*", optional = true, markers = "extra == \"idna2008\""} - -[package.extras] -idna2008 = ["idna"] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" - -[[package]] -name = "sniffio" -version = "1.2.0" -description = "Sniff out which async library your code is running under" -category = "main" -optional = false -python-versions = ">=3.5" - -[[package]] -name = "sqlalchemy" -version = "1.4.39" -description = "Database Abstraction Library" -category = "main" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" - -[package.dependencies] -greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} - -[package.extras] -aiomysql = ["greenlet (!=0.4.17)", "aiomysql"] -aiosqlite = ["typing_extensions (!=3.10.0.1)", "greenlet (!=0.4.17)", "aiosqlite"] -asyncio = ["greenlet (!=0.4.17)"] -asyncmy = ["greenlet (!=0.4.17)", "asyncmy (>=0.2.3,!=0.2.4)"] -mariadb_connector = ["mariadb (>=1.0.1)"] -mssql = ["pyodbc"] -mssql_pymssql = ["pymssql"] -mssql_pyodbc = ["pyodbc"] -mypy = ["sqlalchemy2-stubs", "mypy (>=0.910)"] -mysql = ["mysqlclient (>=1.4.0,<2)", "mysqlclient (>=1.4.0)"] -mysql_connector = ["mysql-connector-python"] -oracle = ["cx_oracle (>=7,<8)", "cx_oracle (>=7)"] -postgresql = ["psycopg2 (>=2.7)"] -postgresql_asyncpg = ["greenlet (!=0.4.17)", "asyncpg"] -postgresql_pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"] -postgresql_psycopg2binary = ["psycopg2-binary"] -postgresql_psycopg2cffi = ["psycopg2cffi"] -pymysql = ["pymysql (<1)", "pymysql"] -sqlcipher = ["sqlcipher3-binary"] - -[[package]] -name = "starlette" -version = "0.20.4" -description = "The little ASGI library that shines." -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -anyio = ">=3.4.0,<5" - -[package.extras] -full = ["itsdangerous", "jinja2", "python-multipart", "pyyaml", "requests"] - -[[package]] -name = "transliterate" -version = "1.10.2" -description = "Bi-directional transliterator for Python" -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -six = ">=1.1.0" - -[[package]] -name = "typing-extensions" -version = "4.3.0" -description = "Backported and Experimental Type Hints for Python 3.7+" -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "urllib3" -version = "1.26.9" -description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" - -[package.extras] -brotli = ["brotlicffi (>=0.8.0)", "brotli (>=1.0.9)", "brotlipy (>=0.6.0)"] -secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] - -[[package]] -name = "uvicorn" -version = "0.18.2" -description = "The lightning-fast ASGI server." -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -click = ">=7.0" -h11 = ">=0.8" - -[package.extras] -standard = ["websockets (>=10.0)", "httptools (>=0.4.0)", "watchfiles (>=0.13)", "python-dotenv (>=0.13)", "PyYAML (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "colorama (>=0.4)"] - -[metadata] -lock-version = "1.1" -python-versions = "^3.10" -content-hash = "487ea114b5b67b7e1d755a18469b3b409b4484e199aad84846504e148a1c69f9" - -[metadata.files] -aioredis = [ - {file = "aioredis-2.0.1-py3-none-any.whl", hash = "sha256:9ac0d0b3b485d293b8ca1987e6de8658d7dafcca1cddfcd1d506cae8cdebfdd6"}, - {file = "aioredis-2.0.1.tar.gz", hash = "sha256:eaa51aaf993f2d71f54b70527c440437ba65340588afeb786cd87c55c89cd98e"}, -] -anyio = [ - {file = "anyio-3.6.1-py3-none-any.whl", hash = "sha256:cb29b9c70620506a9a8f87a309591713446953302d7d995344d0d7c6c0c9a7be"}, - {file = "anyio-3.6.1.tar.gz", hash = "sha256:413adf95f93886e442aea925f3ee43baa5a765a64a0f52c6081894f9992fdd0b"}, -] -ariadne = [ - {file = "ariadne-0.15.1-py3-none-any.whl", hash = "sha256:c87835e895ab0fcf9b3a0ab2b4a692ee56e9b043f449aece2d9b8da7e61a7ccc"}, - {file = "ariadne-0.15.1.tar.gz", hash = "sha256:678851826887ecf27a791135fb9ea9a67305243ebb03f9814fbc50fb7e8251f4"}, -] -async-timeout = [ - {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"}, - {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"}, -] -authlib = [ - {file = "Authlib-1.0.1-py2.py3-none-any.whl", hash = "sha256:1286e2d5ef5bfe5a11cc2d0a0d1031f0393f6ce4d61f5121cfe87fa0054e98bd"}, - {file = "Authlib-1.0.1.tar.gz", hash = "sha256:6e74a4846ac36dfc882b3cc2fbd3d9eb410a627f2f2dc11771276655345223b1"}, -] -bson = [ - {file = "bson-0.5.10.tar.gz", hash = "sha256:d6511b2ab051139a9123c184de1a04227262173ad593429d21e443d6462d6590"}, -] -certifi = [ - {file = "certifi-2022.6.15-py3-none-any.whl", hash = "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412"}, - {file = "certifi-2022.6.15.tar.gz", hash = "sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d"}, -] -cffi = [ - {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, - {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, - {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, - {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, - {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, - {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, - {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, - {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, - {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, - {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, - {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, - {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, - {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, - {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, - {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, - {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, - {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, - {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, - {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, -] -charset-normalizer = [ - {file = "charset-normalizer-2.1.0.tar.gz", hash = "sha256:575e708016ff3a5e3681541cb9d79312c416835686d054a23accb873b254f413"}, - {file = "charset_normalizer-2.1.0-py3-none-any.whl", hash = "sha256:5189b6f22b01957427f35b6a08d9a0bc45b46d3788ef5a92e978433c7a35f8a5"}, -] -click = [ - {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, - {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, -] -colorama = [ - {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"}, - {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"}, -] -cryptography = [ - {file = "cryptography-37.0.3-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:d10413d493e98075060d3e62e5826de372912ea653ccc948f3c41b21ddca087f"}, - {file = "cryptography-37.0.3-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:cd64147ff16506632893ceb2569624b48c84daa3ba4d89695f7c7bc24188eee9"}, - {file = "cryptography-37.0.3-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:17c74f7d9e9e9bb7e84521243695c1b4bdc3a0e44ca764e6bcf8f05f3de3d0df"}, - {file = "cryptography-37.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:0713bee6c8077786c56bdec9c5d3f099d40d2c862ff3200416f6862e9dd63156"}, - {file = "cryptography-37.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9c2008417741cdfbe945ef2d16b7b7ba0790886a0b49e1de533acf93eb66ed6"}, - {file = "cryptography-37.0.3-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:646905ff7a712e415bf0d0f214e0eb669dd2257c4d7a27db1e8baec5d2a1d55f"}, - {file = "cryptography-37.0.3-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:dcafadb5a06cb7a6bb49fb4c1de7414ee2f8c8e12b047606d97c3175d690f582"}, - {file = "cryptography-37.0.3-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:0b4bfc5ccfe4e5c7de535670680398fed4a0bbc5dfd52b3a295baad42230abdf"}, - {file = "cryptography-37.0.3-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:a03dbc0d8ce8c1146c177cd0e3a66ea106f36733fb1b997ea4d051f8a68539ff"}, - {file = "cryptography-37.0.3-cp36-abi3-win32.whl", hash = "sha256:190a24c14e91c1fa3101069aac7e77d11c5a73911c3904128367f52946bbb6fd"}, - {file = "cryptography-37.0.3-cp36-abi3-win_amd64.whl", hash = "sha256:b05c5478524deb7a019e240f2a970040c4b0f01f58f0425e6262c96b126c6a3e"}, - {file = "cryptography-37.0.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891ed8312840fd43e0696468a6520a582a033c0109f7b14b96067bfe1123226b"}, - {file = "cryptography-37.0.3-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:30d6aabf623a01affc7c0824936c3dde6590076b61f5dd299df3cc2c75fc5915"}, - {file = "cryptography-37.0.3-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:31a7c1f1c2551f013d4294d06e22848e2ccd77825f0987cba3239df6ebf7b020"}, - {file = "cryptography-37.0.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a94fd1ff80001cb97add71d07f596d8b865b716f25ef501183e0e199390e50d3"}, - {file = "cryptography-37.0.3-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:8a85dbcc770256918b40c2f40bd3ffd3b2ae45b0cf19068b561db8f8d61bf492"}, - {file = "cryptography-37.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:773d5b5f2e2bd2c7cbb1bd24902ad41283c88b9dd463a0f82adc9a2870d9d066"}, - {file = "cryptography-37.0.3-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:0f9193428a55a4347af2d4fd8141a2002dedbcc26487e67fd2ae19f977ee8afc"}, - {file = "cryptography-37.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bf652c73e8f7c32a3f92f7184bf7f9106dacdf5ef59c3c3683d7dae2c4972fb"}, - {file = "cryptography-37.0.3-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:c3c8b1ad2c266fdf7adc041cc4156d6a3d14db93de2f81b26a5af97ef3f209e5"}, - {file = "cryptography-37.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2383d6c3088e863304c37c65cd2ea404b7fbb4886823eab1d74137cc27f3d2ee"}, - {file = "cryptography-37.0.3.tar.gz", hash = "sha256:ae430d51c67ac638dfbb42edf56c669ca9c74744f4d225ad11c6f3d355858187"}, -] -graphql-core = [ - {file = "graphql-core-3.2.1.tar.gz", hash = "sha256:9d1bf141427b7d54be944587c8349df791ce60ade2e3cccaf9c56368c133c201"}, - {file = "graphql_core-3.2.1-py3-none-any.whl", hash = "sha256:f83c658e4968998eed1923a2e3e3eddd347e005ac0315fbb7ca4d70ea9156323"}, -] -greenlet = [ - {file = "greenlet-1.1.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:58df5c2a0e293bf665a51f8a100d3e9956febfbf1d9aaf8c0677cf70218910c6"}, - {file = "greenlet-1.1.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:aec52725173bd3a7b56fe91bc56eccb26fbdff1386ef123abb63c84c5b43b63a"}, - {file = "greenlet-1.1.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:833e1551925ed51e6b44c800e71e77dacd7e49181fdc9ac9a0bf3714d515785d"}, - {file = "greenlet-1.1.2-cp27-cp27m-win32.whl", hash = "sha256:aa5b467f15e78b82257319aebc78dd2915e4c1436c3c0d1ad6f53e47ba6e2713"}, - {file = "greenlet-1.1.2-cp27-cp27m-win_amd64.whl", hash = "sha256:40b951f601af999a8bf2ce8c71e8aaa4e8c6f78ff8afae7b808aae2dc50d4c40"}, - {file = "greenlet-1.1.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:95e69877983ea39b7303570fa6760f81a3eec23d0e3ab2021b7144b94d06202d"}, - {file = "greenlet-1.1.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:356b3576ad078c89a6107caa9c50cc14e98e3a6c4874a37c3e0273e4baf33de8"}, - {file = "greenlet-1.1.2-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:8639cadfda96737427330a094476d4c7a56ac03de7265622fcf4cfe57c8ae18d"}, - {file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97e5306482182170ade15c4b0d8386ded995a07d7cc2ca8f27958d34d6736497"}, - {file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6a36bb9474218c7a5b27ae476035497a6990e21d04c279884eb10d9b290f1b1"}, - {file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abb7a75ed8b968f3061327c433a0fbd17b729947b400747c334a9c29a9af6c58"}, - {file = "greenlet-1.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b336501a05e13b616ef81ce329c0e09ac5ed8c732d9ba7e3e983fcc1a9e86965"}, - {file = "greenlet-1.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:14d4f3cd4e8b524ae9b8aa567858beed70c392fdec26dbdb0a8a418392e71708"}, - {file = "greenlet-1.1.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:17ff94e7a83aa8671a25bf5b59326ec26da379ace2ebc4411d690d80a7fbcf23"}, - {file = "greenlet-1.1.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9f3cba480d3deb69f6ee2c1825060177a22c7826431458c697df88e6aeb3caee"}, - {file = "greenlet-1.1.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:fa877ca7f6b48054f847b61d6fa7bed5cebb663ebc55e018fda12db09dcc664c"}, - {file = "greenlet-1.1.2-cp35-cp35m-win32.whl", hash = "sha256:7cbd7574ce8e138bda9df4efc6bf2ab8572c9aff640d8ecfece1b006b68da963"}, - {file = "greenlet-1.1.2-cp35-cp35m-win_amd64.whl", hash = "sha256:903bbd302a2378f984aef528f76d4c9b1748f318fe1294961c072bdc7f2ffa3e"}, - {file = "greenlet-1.1.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:049fe7579230e44daef03a259faa24511d10ebfa44f69411d99e6a184fe68073"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:dd0b1e9e891f69e7675ba5c92e28b90eaa045f6ab134ffe70b52e948aa175b3c"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:7418b6bfc7fe3331541b84bb2141c9baf1ec7132a7ecd9f375912eca810e714e"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9d29ca8a77117315101425ec7ec2a47a22ccf59f5593378fc4077ac5b754fce"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:21915eb821a6b3d9d8eefdaf57d6c345b970ad722f856cd71739493ce003ad08"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eff9d20417ff9dcb0d25e2defc2574d10b491bf2e693b4e491914738b7908168"}, - {file = "greenlet-1.1.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b8c008de9d0daba7b6666aa5bbfdc23dcd78cafc33997c9b7741ff6353bafb7f"}, - {file = "greenlet-1.1.2-cp36-cp36m-win32.whl", hash = "sha256:32ca72bbc673adbcfecb935bb3fb1b74e663d10a4b241aaa2f5a75fe1d1f90aa"}, - {file = "greenlet-1.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f0214eb2a23b85528310dad848ad2ac58e735612929c8072f6093f3585fd342d"}, - {file = "greenlet-1.1.2-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:b92e29e58bef6d9cfd340c72b04d74c4b4e9f70c9fa7c78b674d1fec18896dc4"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:fdcec0b8399108577ec290f55551d926d9a1fa6cad45882093a7a07ac5ec147b"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:93f81b134a165cc17123626ab8da2e30c0455441d4ab5576eed73a64c025b25c"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e12bdc622676ce47ae9abbf455c189e442afdde8818d9da983085df6312e7a1"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c790abda465726cfb8bb08bd4ca9a5d0a7bd77c7ac1ca1b839ad823b948ea28"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f276df9830dba7a333544bd41070e8175762a7ac20350786b322b714b0e654f5"}, - {file = "greenlet-1.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c5d5b35f789a030ebb95bff352f1d27a93d81069f2adb3182d99882e095cefe"}, - {file = "greenlet-1.1.2-cp37-cp37m-win32.whl", hash = "sha256:64e6175c2e53195278d7388c454e0b30997573f3f4bd63697f88d855f7a6a1fc"}, - {file = "greenlet-1.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b11548073a2213d950c3f671aa88e6f83cda6e2fb97a8b6317b1b5b33d850e06"}, - {file = "greenlet-1.1.2-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:9633b3034d3d901f0a46b7939f8c4d64427dfba6bbc5a36b1a67364cf148a1b0"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:eb6ea6da4c787111adf40f697b4e58732ee0942b5d3bd8f435277643329ba627"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:f3acda1924472472ddd60c29e5b9db0cec629fbe3c5c5accb74d6d6d14773478"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e859fcb4cbe93504ea18008d1df98dee4f7766db66c435e4882ab35cf70cac43"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00e44c8afdbe5467e4f7b5851be223be68adb4272f44696ee71fe46b7036a711"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec8c433b3ab0419100bd45b47c9c8551248a5aee30ca5e9d399a0b57ac04651b"}, - {file = "greenlet-1.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2bde6792f313f4e918caabc46532aa64aa27a0db05d75b20edfc5c6f46479de2"}, - {file = "greenlet-1.1.2-cp38-cp38-win32.whl", hash = "sha256:288c6a76705dc54fba69fbcb59904ae4ad768b4c768839b8ca5fdadec6dd8cfd"}, - {file = "greenlet-1.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:8d2f1fb53a421b410751887eb4ff21386d119ef9cde3797bf5e7ed49fb51a3b3"}, - {file = "greenlet-1.1.2-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:166eac03e48784a6a6e0e5f041cfebb1ab400b394db188c48b3a84737f505b67"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:572e1787d1460da79590bf44304abbc0a2da944ea64ec549188fa84d89bba7ab"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:be5f425ff1f5f4b3c1e33ad64ab994eed12fc284a6ea71c5243fd564502ecbe5"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1692f7d6bc45e3200844be0dba153612103db241691088626a33ff1f24a0d88"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7227b47e73dedaa513cdebb98469705ef0d66eb5a1250144468e9c3097d6b59b"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ff61ff178250f9bb3cd89752df0f1dd0e27316a8bd1465351652b1b4a4cdfd3"}, - {file = "greenlet-1.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0051c6f1f27cb756ffc0ffbac7d2cd48cb0362ac1736871399a739b2885134d3"}, - {file = "greenlet-1.1.2-cp39-cp39-win32.whl", hash = "sha256:f70a9e237bb792c7cc7e44c531fd48f5897961701cdaa06cf22fc14965c496cf"}, - {file = "greenlet-1.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:013d61294b6cd8fe3242932c1c5e36e5d1db2c8afb58606c5a67efce62c1f5fd"}, - {file = "greenlet-1.1.2.tar.gz", hash = "sha256:e30f5ea4ae2346e62cedde8794a56858a67b878dd79f7df76a0767e356b1744a"}, -] -h11 = [ - {file = "h11-0.12.0-py3-none-any.whl", hash = "sha256:36a3cb8c0a032f56e2da7084577878a035d3b61d104230d4bd49c0c6b555a9c6"}, - {file = "h11-0.12.0.tar.gz", hash = "sha256:47222cb6067e4a307d535814917cd98fd0a57b6788ce715755fa2b6c28b56042"}, -] -httpcore = [ - {file = "httpcore-0.15.0-py3-none-any.whl", hash = "sha256:1105b8b73c025f23ff7c36468e4432226cbb959176eab66864b8e31c4ee27fa6"}, - {file = "httpcore-0.15.0.tar.gz", hash = "sha256:18b68ab86a3ccf3e7dc0f43598eaddcf472b602aba29f9aa6ab85fe2ada3980b"}, -] -httpx = [ - {file = "httpx-0.23.0-py3-none-any.whl", hash = "sha256:42974f577483e1e932c3cdc3cd2303e883cbfba17fe228b0f63589764d7b9c4b"}, - {file = "httpx-0.23.0.tar.gz", hash = "sha256:f28eac771ec9eb4866d3fb4ab65abd42d38c424739e80c08d8d20570de60b0ef"}, -] -idna = [ - {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, - {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"}, -] -itsdangerous = [ - {file = "itsdangerous-2.1.2-py3-none-any.whl", hash = "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44"}, - {file = "itsdangerous-2.1.2.tar.gz", hash = "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"}, -] -passlib = [ - {file = "passlib-1.7.4-py2.py3-none-any.whl", hash = "sha256:aa6bca462b8d8bda89c70b382f0c298a20b5560af6cbfa2dce410c0a2fb669f1"}, - {file = "passlib-1.7.4.tar.gz", hash = "sha256:defd50f72b65c5402ab2c573830a6978e5f202ad0d984793c8dde2c4152ebe04"}, -] -psycopg2-binary = [ - {file = "psycopg2-binary-2.9.3.tar.gz", hash = "sha256:761df5313dc15da1502b21453642d7599d26be88bff659382f8f9747c7ebea4e"}, - {file = "psycopg2_binary-2.9.3-cp310-cp310-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:539b28661b71da7c0e428692438efbcd048ca21ea81af618d845e06ebfd29478"}, - {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e82d38390a03da28c7985b394ec3f56873174e2c88130e6966cb1c946508e65"}, - {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57804fc02ca3ce0dbfbef35c4b3a4a774da66d66ea20f4bda601294ad2ea6092"}, - {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:083a55275f09a62b8ca4902dd11f4b33075b743cf0d360419e2051a8a5d5ff76"}, - {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_24_ppc64le.whl", hash = "sha256:0a29729145aaaf1ad8bafe663131890e2111f13416b60e460dae0a96af5905c9"}, - {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a79d622f5206d695d7824cbf609a4f5b88ea6d6dab5f7c147fc6d333a8787e4"}, - {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:090f3348c0ab2cceb6dfbe6bf721ef61262ddf518cd6cc6ecc7d334996d64efa"}, - {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a9e1f75f96ea388fbcef36c70640c4efbe4650658f3d6a2967b4cc70e907352e"}, - {file = "psycopg2_binary-2.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c3ae8e75eb7160851e59adc77b3a19a976e50622e44fd4fd47b8b18208189d42"}, - {file = "psycopg2_binary-2.9.3-cp310-cp310-win32.whl", hash = "sha256:7b1e9b80afca7b7a386ef087db614faebbf8839b7f4db5eb107d0f1a53225029"}, - {file = "psycopg2_binary-2.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:8b344adbb9a862de0c635f4f0425b7958bf5a4b927c8594e6e8d261775796d53"}, - {file = "psycopg2_binary-2.9.3-cp36-cp36m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:e847774f8ffd5b398a75bc1c18fbb56564cda3d629fe68fd81971fece2d3c67e"}, - {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:68641a34023d306be959101b345732360fc2ea4938982309b786f7be1b43a4a1"}, - {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3303f8807f342641851578ee7ed1f3efc9802d00a6f83c101d21c608cb864460"}, - {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_24_aarch64.whl", hash = "sha256:e3699852e22aa68c10de06524a3721ade969abf382da95884e6a10ff798f9281"}, - {file = "psycopg2_binary-2.9.3-cp36-cp36m-manylinux_2_24_ppc64le.whl", hash = "sha256:526ea0378246d9b080148f2d6681229f4b5964543c170dd10bf4faaab6e0d27f"}, - {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:b1c8068513f5b158cf7e29c43a77eb34b407db29aca749d3eb9293ee0d3103ca"}, - {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:15803fa813ea05bef089fa78835118b5434204f3a17cb9f1e5dbfd0b9deea5af"}, - {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:152f09f57417b831418304c7f30d727dc83a12761627bb826951692cc6491e57"}, - {file = "psycopg2_binary-2.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:404224e5fef3b193f892abdbf8961ce20e0b6642886cfe1fe1923f41aaa75c9d"}, - {file = "psycopg2_binary-2.9.3-cp36-cp36m-win32.whl", hash = "sha256:1f6b813106a3abdf7b03640d36e24669234120c72e91d5cbaeb87c5f7c36c65b"}, - {file = "psycopg2_binary-2.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:2d872e3c9d5d075a2e104540965a1cf898b52274a5923936e5bfddb58c59c7c2"}, - {file = "psycopg2_binary-2.9.3-cp37-cp37m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:10bb90fb4d523a2aa67773d4ff2b833ec00857f5912bafcfd5f5414e45280fb1"}, - {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a52ecab70af13e899f7847b3e074eeb16ebac5615665db33bce8a1009cf33"}, - {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a29b3ca4ec9defec6d42bf5feb36bb5817ba3c0230dd83b4edf4bf02684cd0ae"}, - {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:12b11322ea00ad8db8c46f18b7dfc47ae215e4df55b46c67a94b4effbaec7094"}, - {file = "psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_24_ppc64le.whl", hash = "sha256:53293533fcbb94c202b7c800a12c873cfe24599656b341f56e71dd2b557be063"}, - {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c381bda330ddf2fccbafab789d83ebc6c53db126e4383e73794c74eedce855ef"}, - {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9d29409b625a143649d03d0fd7b57e4b92e0ecad9726ba682244b73be91d2fdb"}, - {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:183a517a3a63503f70f808b58bfbf962f23d73b6dccddae5aa56152ef2bcb232"}, - {file = "psycopg2_binary-2.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:15c4e4cfa45f5a60599d9cec5f46cd7b1b29d86a6390ec23e8eebaae84e64554"}, - {file = "psycopg2_binary-2.9.3-cp37-cp37m-win32.whl", hash = "sha256:adf20d9a67e0b6393eac162eb81fb10bc9130a80540f4df7e7355c2dd4af9fba"}, - {file = "psycopg2_binary-2.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2f9ffd643bc7349eeb664eba8864d9e01f057880f510e4681ba40a6532f93c71"}, - {file = "psycopg2_binary-2.9.3-cp38-cp38-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:def68d7c21984b0f8218e8a15d514f714d96904265164f75f8d3a70f9c295667"}, - {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dffc08ca91c9ac09008870c9eb77b00a46b3378719584059c034b8945e26b272"}, - {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:280b0bb5cbfe8039205c7981cceb006156a675362a00fe29b16fbc264e242834"}, - {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:af9813db73395fb1fc211bac696faea4ca9ef53f32dc0cfa27e4e7cf766dcf24"}, - {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_24_ppc64le.whl", hash = "sha256:63638d875be8c2784cfc952c9ac34e2b50e43f9f0a0660b65e2a87d656b3116c"}, - {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ffb7a888a047696e7f8240d649b43fb3644f14f0ee229077e7f6b9f9081635bd"}, - {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0c9d5450c566c80c396b7402895c4369a410cab5a82707b11aee1e624da7d004"}, - {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:d1c1b569ecafe3a69380a94e6ae09a4789bbb23666f3d3a08d06bbd2451f5ef1"}, - {file = "psycopg2_binary-2.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8fc53f9af09426a61db9ba357865c77f26076d48669f2e1bb24d85a22fb52307"}, - {file = "psycopg2_binary-2.9.3-cp38-cp38-win32.whl", hash = "sha256:6472a178e291b59e7f16ab49ec8b4f3bdada0a879c68d3817ff0963e722a82ce"}, - {file = "psycopg2_binary-2.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:35168209c9d51b145e459e05c31a9eaeffa9a6b0fd61689b48e07464ffd1a83e"}, - {file = "psycopg2_binary-2.9.3-cp39-cp39-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:47133f3f872faf28c1e87d4357220e809dfd3fa7c64295a4a148bcd1e6e34ec9"}, - {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91920527dea30175cc02a1099f331aa8c1ba39bf8b7762b7b56cbf54bc5cce42"}, - {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:887dd9aac71765ac0d0bac1d0d4b4f2c99d5f5c1382d8b770404f0f3d0ce8a39"}, - {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:1f14c8b0942714eb3c74e1e71700cbbcb415acbc311c730370e70c578a44a25c"}, - {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_24_ppc64le.whl", hash = "sha256:7af0dd86ddb2f8af5da57a976d27cd2cd15510518d582b478fbb2292428710b4"}, - {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:93cd1967a18aa0edd4b95b1dfd554cf15af657cb606280996d393dadc88c3c35"}, - {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bda845b664bb6c91446ca9609fc69f7db6c334ec5e4adc87571c34e4f47b7ddb"}, - {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:01310cf4cf26db9aea5158c217caa92d291f0500051a6469ac52166e1a16f5b7"}, - {file = "psycopg2_binary-2.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:99485cab9ba0fa9b84f1f9e1fef106f44a46ef6afdeec8885e0b88d0772b49e8"}, - {file = "psycopg2_binary-2.9.3-cp39-cp39-win32.whl", hash = "sha256:46f0e0a6b5fa5851bbd9ab1bc805eef362d3a230fbdfbc209f4a236d0a7a990d"}, - {file = "psycopg2_binary-2.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:accfe7e982411da3178ec690baaceaad3c278652998b2c45828aaac66cd8285f"}, -] -pycparser = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, -] -pydantic = [ - {file = "pydantic-1.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8098a724c2784bf03e8070993f6d46aa2eeca031f8d8a048dff277703e6e193"}, - {file = "pydantic-1.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c320c64dd876e45254bdd350f0179da737463eea41c43bacbee9d8c9d1021f11"}, - {file = "pydantic-1.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18f3e912f9ad1bdec27fb06b8198a2ccc32f201e24174cec1b3424dda605a310"}, - {file = "pydantic-1.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11951b404e08b01b151222a1cb1a9f0a860a8153ce8334149ab9199cd198131"}, - {file = "pydantic-1.9.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8bc541a405423ce0e51c19f637050acdbdf8feca34150e0d17f675e72d119580"}, - {file = "pydantic-1.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e565a785233c2d03724c4dc55464559639b1ba9ecf091288dd47ad9c629433bd"}, - {file = "pydantic-1.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:a4a88dcd6ff8fd47c18b3a3709a89adb39a6373f4482e04c1b765045c7e282fd"}, - {file = "pydantic-1.9.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:447d5521575f18e18240906beadc58551e97ec98142266e521c34968c76c8761"}, - {file = "pydantic-1.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:985ceb5d0a86fcaa61e45781e567a59baa0da292d5ed2e490d612d0de5796918"}, - {file = "pydantic-1.9.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:059b6c1795170809103a1538255883e1983e5b831faea6558ef873d4955b4a74"}, - {file = "pydantic-1.9.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d12f96b5b64bec3f43c8e82b4aab7599d0157f11c798c9f9c528a72b9e0b339a"}, - {file = "pydantic-1.9.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ae72f8098acb368d877b210ebe02ba12585e77bd0db78ac04a1ee9b9f5dd2166"}, - {file = "pydantic-1.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:79b485767c13788ee314669008d01f9ef3bc05db9ea3298f6a50d3ef596a154b"}, - {file = "pydantic-1.9.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:494f7c8537f0c02b740c229af4cb47c0d39840b829ecdcfc93d91dcbb0779892"}, - {file = "pydantic-1.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0f047e11febe5c3198ed346b507e1d010330d56ad615a7e0a89fae604065a0e"}, - {file = "pydantic-1.9.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:969dd06110cb780da01336b281f53e2e7eb3a482831df441fb65dd30403f4608"}, - {file = "pydantic-1.9.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:177071dfc0df6248fd22b43036f936cfe2508077a72af0933d0c1fa269b18537"}, - {file = "pydantic-1.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9bcf8b6e011be08fb729d110f3e22e654a50f8a826b0575c7196616780683380"}, - {file = "pydantic-1.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a955260d47f03df08acf45689bd163ed9df82c0e0124beb4251b1290fa7ae728"}, - {file = "pydantic-1.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9ce157d979f742a915b75f792dbd6aa63b8eccaf46a1005ba03aa8a986bde34a"}, - {file = "pydantic-1.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0bf07cab5b279859c253d26a9194a8906e6f4a210063b84b433cf90a569de0c1"}, - {file = "pydantic-1.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d93d4e95eacd313d2c765ebe40d49ca9dd2ed90e5b37d0d421c597af830c195"}, - {file = "pydantic-1.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1542636a39c4892c4f4fa6270696902acb186a9aaeac6f6cf92ce6ae2e88564b"}, - {file = "pydantic-1.9.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a9af62e9b5b9bc67b2a195ebc2c2662fdf498a822d62f902bf27cccb52dbbf49"}, - {file = "pydantic-1.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fe4670cb32ea98ffbf5a1262f14c3e102cccd92b1869df3bb09538158ba90fe6"}, - {file = "pydantic-1.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:9f659a5ee95c8baa2436d392267988fd0f43eb774e5eb8739252e5a7e9cf07e0"}, - {file = "pydantic-1.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b83ba3825bc91dfa989d4eed76865e71aea3a6ca1388b59fc801ee04c4d8d0d6"}, - {file = "pydantic-1.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1dd8fecbad028cd89d04a46688d2fcc14423e8a196d5b0a5c65105664901f810"}, - {file = "pydantic-1.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02eefd7087268b711a3ff4db528e9916ac9aa18616da7bca69c1871d0b7a091f"}, - {file = "pydantic-1.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7eb57ba90929bac0b6cc2af2373893d80ac559adda6933e562dcfb375029acee"}, - {file = "pydantic-1.9.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4ce9ae9e91f46c344bec3b03d6ee9612802682c1551aaf627ad24045ce090761"}, - {file = "pydantic-1.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:72ccb318bf0c9ab97fc04c10c37683d9eea952ed526707fabf9ac5ae59b701fd"}, - {file = "pydantic-1.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:61b6760b08b7c395975d893e0b814a11cf011ebb24f7d869e7118f5a339a82e1"}, - {file = "pydantic-1.9.1-py3-none-any.whl", hash = "sha256:4988c0f13c42bfa9ddd2fe2f569c9d54646ce84adc5de84228cfe83396f3bd58"}, - {file = "pydantic-1.9.1.tar.gz", hash = "sha256:1ed987c3ff29fff7fd8c3ea3a3ea877ad310aae2ef9889a119e22d3f2db0691a"}, -] -pyjwt = [ - {file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"}, - {file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"}, -] -python-dateutil = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] -python-frontmatter = [ - {file = "python-frontmatter-1.0.0.tar.gz", hash = "sha256:e98152e977225ddafea6f01f40b4b0f1de175766322004c826ca99842d19a7cd"}, - {file = "python_frontmatter-1.0.0-py3-none-any.whl", hash = "sha256:766ae75f1b301ffc5fe3494339147e0fd80bc3deff3d7590a93991978b579b08"}, -] -pyyaml = [ - {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, - {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, - {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, - {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, - {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, - {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, - {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, - {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, - {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, - {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, - {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, - {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, - {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, - {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, - {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, - {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, -] -requests = [ - {file = "requests-2.28.1-py3-none-any.whl", hash = "sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349"}, - {file = "requests-2.28.1.tar.gz", hash = "sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983"}, -] -rfc3986 = [ - {file = "rfc3986-1.5.0-py2.py3-none-any.whl", hash = "sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97"}, - {file = "rfc3986-1.5.0.tar.gz", hash = "sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835"}, -] -six = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] -sniffio = [ - {file = "sniffio-1.2.0-py3-none-any.whl", hash = "sha256:471b71698eac1c2112a40ce2752bb2f4a4814c22a54a3eed3676bc0f5ca9f663"}, - {file = "sniffio-1.2.0.tar.gz", hash = "sha256:c4666eecec1d3f50960c6bdf61ab7bc350648da6c126e3cf6898d8cd4ddcd3de"}, -] -sqlalchemy = [ - {file = "SQLAlchemy-1.4.39-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:4770eb3ba69ec5fa41c681a75e53e0e342ac24c1f9220d883458b5596888e43a"}, - {file = "SQLAlchemy-1.4.39-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:752ef2e8dbaa3c5d419f322e3632f00ba6b1c3230f65bc97c2ff5c5c6c08f441"}, - {file = "SQLAlchemy-1.4.39-cp27-cp27m-win32.whl", hash = "sha256:b30e70f1594ee3c8902978fd71900d7312453922827c4ce0012fa6a8278d6df4"}, - {file = "SQLAlchemy-1.4.39-cp27-cp27m-win_amd64.whl", hash = "sha256:864d4f89f054819cb95e93100b7d251e4d114d1c60bc7576db07b046432af280"}, - {file = "SQLAlchemy-1.4.39-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:8f901be74f00a13bf375241a778455ee864c2c21c79154aad196b7a994e1144f"}, - {file = "SQLAlchemy-1.4.39-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:1745987ada1890b0e7978abdb22c133eca2e89ab98dc17939042240063e1ef21"}, - {file = "SQLAlchemy-1.4.39-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ede13a472caa85a13abe5095e71676af985d7690eaa8461aeac5c74f6600b6c0"}, - {file = "SQLAlchemy-1.4.39-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7f13644b15665f7322f9e0635129e0ef2098409484df67fcd225d954c5861559"}, - {file = "SQLAlchemy-1.4.39-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26146c59576dfe9c546c9f45397a7c7c4a90c25679492ff610a7500afc7d03a6"}, - {file = "SQLAlchemy-1.4.39-cp310-cp310-win32.whl", hash = "sha256:91d2b89bb0c302f89e753bea008936acfa4e18c156fb264fe41eb6bbb2bbcdeb"}, - {file = "SQLAlchemy-1.4.39-cp310-cp310-win_amd64.whl", hash = "sha256:50e7569637e2e02253295527ff34666706dbb2bc5f6c61a5a7f44b9610c9bb09"}, - {file = "SQLAlchemy-1.4.39-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:107df519eb33d7f8e0d0d052128af2f25066c1a0f6b648fd1a9612ab66800b86"}, - {file = "SQLAlchemy-1.4.39-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f24d4d6ec301688c59b0c4bb1c1c94c5d0bff4ecad33bb8f5d9efdfb8d8bc925"}, - {file = "SQLAlchemy-1.4.39-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7b2785dd2a0c044a36836857ac27310dc7a99166253551ee8f5408930958cc60"}, - {file = "SQLAlchemy-1.4.39-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6e2c8581c6620136b9530137954a8376efffd57fe19802182c7561b0ab48b48"}, - {file = "SQLAlchemy-1.4.39-cp36-cp36m-win32.whl", hash = "sha256:fbc076f79d830ae4c9d49926180a1140b49fa675d0f0d555b44c9a15b29f4c80"}, - {file = "SQLAlchemy-1.4.39-cp36-cp36m-win_amd64.whl", hash = "sha256:0ec54460475f0c42512895c99c63d90dd2d9cbd0c13491a184182e85074b04c5"}, - {file = "SQLAlchemy-1.4.39-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:6f95706da857e6e79b54c33c1214f5467aab10600aa508ddd1239d5df271986e"}, - {file = "SQLAlchemy-1.4.39-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:621f050e72cc7dfd9ad4594ff0abeaad954d6e4a2891545e8f1a53dcdfbef445"}, - {file = "SQLAlchemy-1.4.39-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05a05771617bfa723ba4cef58d5b25ac028b0d68f28f403edebed5b8243b3a87"}, - {file = "SQLAlchemy-1.4.39-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20bf65bcce65c538e68d5df27402b39341fabeecf01de7e0e72b9d9836c13c52"}, - {file = "SQLAlchemy-1.4.39-cp37-cp37m-win32.whl", hash = "sha256:f2a42acc01568b9701665e85562bbff78ec3e21981c7d51d56717c22e5d3d58b"}, - {file = "SQLAlchemy-1.4.39-cp37-cp37m-win_amd64.whl", hash = "sha256:6d81de54e45f1d756785405c9d06cd17918c2eecc2d4262dc2d276ca612c2f61"}, - {file = "SQLAlchemy-1.4.39-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:5c2d19bfb33262bf987ef0062345efd0f54c4189c2d95159c72995457bf4a359"}, - {file = "SQLAlchemy-1.4.39-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14ea8ff2d33c48f8e6c3c472111d893b9e356284d1482102da9678195e5a8eac"}, - {file = "SQLAlchemy-1.4.39-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec3985c883d6d217cf2013028afc6e3c82b8907192ba6195d6e49885bfc4b19d"}, - {file = "SQLAlchemy-1.4.39-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1962dfee37b7fb17d3d4889bf84c4ea08b1c36707194c578f61e6e06d12ab90f"}, - {file = "SQLAlchemy-1.4.39-cp38-cp38-win32.whl", hash = "sha256:047ef5ccd8860f6147b8ac6c45a4bc573d4e030267b45d9a1c47b55962ff0e6f"}, - {file = "SQLAlchemy-1.4.39-cp38-cp38-win_amd64.whl", hash = "sha256:b71be98ef6e180217d1797185c75507060a57ab9cd835653e0112db16a710f0d"}, - {file = "SQLAlchemy-1.4.39-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:365b75938049ae31cf2176efd3d598213ddb9eb883fbc82086efa019a5f649df"}, - {file = "SQLAlchemy-1.4.39-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7a7667d928ba6ee361a3176e1bef6847c1062b37726b33505cc84136f657e0d"}, - {file = "SQLAlchemy-1.4.39-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c6d00cb9da8d0cbfaba18cad046e94b06de6d4d0ffd9d4095a3ad1838af22528"}, - {file = "SQLAlchemy-1.4.39-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0538b66f959771c56ff996d828081908a6a52a47c5548faed4a3d0a027a5368"}, - {file = "SQLAlchemy-1.4.39-cp39-cp39-win32.whl", hash = "sha256:d1f665e50592caf4cad3caed3ed86f93227bffe0680218ccbb293bd5a6734ca8"}, - {file = "SQLAlchemy-1.4.39-cp39-cp39-win_amd64.whl", hash = "sha256:8b773c9974c272aae0fa7e95b576d98d17ee65f69d8644f9b6ffc90ee96b4d19"}, - {file = "SQLAlchemy-1.4.39.tar.gz", hash = "sha256:8194896038753b46b08a0b0ae89a5d80c897fb601dd51e243ed5720f1f155d27"}, -] -starlette = [ - {file = "starlette-0.20.4-py3-none-any.whl", hash = "sha256:c0414d5a56297d37f3db96a84034d61ce29889b9eaccf65eb98a0b39441fcaa3"}, - {file = "starlette-0.20.4.tar.gz", hash = "sha256:42fcf3122f998fefce3e2c5ad7e5edbf0f02cf685d646a83a08d404726af5084"}, -] -transliterate = [ - {file = "transliterate-1.10.2-py2.py3-none-any.whl", hash = "sha256:010a5021bf6021689c4fade0985f3f7b3db1f2f16a48a09a56797f171c08ed42"}, - {file = "transliterate-1.10.2.tar.gz", hash = "sha256:bc608e0d48e687db9c2b1d7ea7c381afe0d1849cad216087d8e03d8d06a57c85"}, -] -typing-extensions = [ - {file = "typing_extensions-4.3.0-py3-none-any.whl", hash = "sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02"}, - {file = "typing_extensions-4.3.0.tar.gz", hash = "sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6"}, -] -urllib3 = [ - {file = "urllib3-1.26.9-py2.py3-none-any.whl", hash = "sha256:44ece4d53fb1706f667c9bd1c648f5469a2ec925fcf3a776667042d645472c14"}, - {file = "urllib3-1.26.9.tar.gz", hash = "sha256:aabaf16477806a5e1dd19aa41f8c2b7950dd3c746362d7e3223dbe6de6ac448e"}, -] -uvicorn = [ - {file = "uvicorn-0.18.2-py3-none-any.whl", hash = "sha256:c19a057deb1c5bb060946e2e5c262fc01590c6529c0af2c3d9ce941e89bc30e0"}, - {file = "uvicorn-0.18.2.tar.gz", hash = "sha256:cade07c403c397f9fe275492a48c1b869efd175d5d8a692df649e6e7e2ed8f4e"}, -] diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index 69e890bf..00000000 --- a/pyproject.toml +++ /dev/null @@ -1,31 +0,0 @@ -[tool.poetry] -name = "discoursio-api" -version = "0.2.0" -description = "" -authors = ["Discours DevTeam "] -license = "MIT" - -[tool.poetry.dependencies] -python = "^3.10" -python-frontmatter = "^1.0.0" -httpx = "^0.23.0" -ariadne = "^0.15.1" -passlib = "^1.7.4" -uvicorn = "^0.18.2" -SQLAlchemy = "^1.4.39" -starlette = "^0.20.4" -PyJWT = "^2.4.0" -transliterate = "^1.10.2" -requests = "^2.28.1" -pydantic = "^1.9.1" -bson = "^0.5.10" -itsdangerous = "^2.1.2" -psycopg2-binary = "^2.9.3" -Authlib = "^1.0.1" -aioredis = "^2.0.1" - -[tool.poetry.dev-dependencies] - -[build-system] -requires = ["poetry-core>=1.0.0"] -build-backend = "poetry.core.masonry.api" diff --git a/requirements.txt b/requirements.txt index e369a526..6ff8641b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,10 +10,7 @@ itsdangerous authlib==0.15.5 httpx>=0.23.0 psycopg2-binary -bson -python-frontmatter transliterate requests bcrypt -bs4 websockets diff --git a/settings.py b/settings.py index cdb92226..d4803374 100644 --- a/settings.py +++ b/settings.py @@ -10,7 +10,7 @@ RESET_PWD_URL = environ.get("RESET_PWD_URL") or "https://localhost:8080/reset_pw CONFIRM_EMAIL_URL = environ.get("CONFIRM_EMAIL_URL") or "https://new.discours.io" ERROR_URL_ON_FRONTEND = environ.get("ERROR_URL_ON_FRONTEND") or "https://new.discours.io" -DB_URL = environ.get("DATABASE_URL") or environ.get("DB_URL") or "postgresql://localhost:5432/discoursio" or "sqlite:///db.sqlite3" +DB_URL = environ.get("DATABASE_URL") or environ.get("DB_URL") or "postgresql://postgres@localhost:5432/discoursio" or "sqlite:///db.sqlite3" JWT_ALGORITHM = "HS256" JWT_SECRET_KEY = "8f1bd7696ffb482d8486dfbc6e7d16dd-secret-key" JWT_LIFE_SPAN = 24 * 60 * 60 # seconds