|
@@ -1,3 +1,4 @@
|
|
|
|
+from ulid import ULID
|
|
from dataclasses import asdict, replace
|
|
from dataclasses import asdict, replace
|
|
|
|
|
|
from importlib.util import find_spec
|
|
from importlib.util import find_spec
|
|
@@ -9,9 +10,9 @@ import json
|
|
from flask import request, g, jsonify, render_template, Blueprint, url_for, session
|
|
from flask import request, g, jsonify, render_template, Blueprint, url_for, session
|
|
|
|
|
|
from twitter_v2.api import ApiV2TweetSource
|
|
from twitter_v2.api import ApiV2TweetSource
|
|
-from .view_model import FeedItem, cleandict
|
|
|
|
|
|
+from .view_model import FeedItem, CollectionPage, cleandict
|
|
|
|
|
|
-from .content_system import get_all_content, register_content_source
|
|
|
|
|
|
+from .content_system import get_content, get_all_content, register_content_source
|
|
|
|
|
|
twitter_enabled = False
|
|
twitter_enabled = False
|
|
if find_spec('twitter_v2_facade'):
|
|
if find_spec('twitter_v2_facade'):
|
|
@@ -19,8 +20,8 @@ if find_spec('twitter_v2_facade'):
|
|
twitter_enabled = True
|
|
twitter_enabled = True
|
|
|
|
|
|
youtube_enabled = False
|
|
youtube_enabled = False
|
|
-if find_spec('youtube_facade'):
|
|
|
|
- from youtube_facade import youtube_model, get_youtube_builder
|
|
|
|
|
|
+if find_spec('youtube_v3_facade'):
|
|
|
|
+ from youtube_v3_facade.content_source import youtube_model, get_youtube_builder
|
|
youtube_enabled = True
|
|
youtube_enabled = True
|
|
|
|
|
|
DATA_DIR=".data"
|
|
DATA_DIR=".data"
|
|
@@ -32,7 +33,11 @@ item_collections_bp = Blueprint('item_collections', 'item_collections',
|
|
|
|
|
|
|
|
|
|
def get_tweet_collection (collection_id):
|
|
def get_tweet_collection (collection_id):
|
|
- with open(f'{DATA_DIR}/collection/{collection_id}.json', 'rt', encoding='utf-8') as f:
|
|
|
|
|
|
+ json_path = f'{DATA_DIR}/collection/{collection_id}.json'
|
|
|
|
+ if not os.path.exists(json_path):
|
|
|
|
+ return
|
|
|
|
+
|
|
|
|
+ with open(json_path, 'rt', encoding='utf-8') as f:
|
|
collection = json.loads(f.read())
|
|
collection = json.loads(f.read())
|
|
|
|
|
|
return collection
|
|
return collection
|
|
@@ -195,9 +200,8 @@ def get_collection_html (collection_id):
|
|
query['next_page_url'] = url_for('.get_collection_html', collection_id=collection_id, pagination_token=pagination_token)
|
|
query['next_page_url'] = url_for('.get_collection_html', collection_id=collection_id, pagination_token=pagination_token)
|
|
return render_template('tweet-collection.html', tweets = feed_items, user = {}, query = query)
|
|
return render_template('tweet-collection.html', tweets = feed_items, user = {}, query = query)
|
|
|
|
|
|
-# pagination token is the next tweet_ID
|
|
|
|
-@item_collections_bp.get('/collections.html')
|
|
|
|
-def get_collections_html ():
|
|
|
|
|
|
+
|
|
|
|
+def get_collection_list (me = None):
|
|
me = request.args.get('me')
|
|
me = request.args.get('me')
|
|
acct = session.get(me)
|
|
acct = session.get(me)
|
|
|
|
|
|
@@ -213,17 +217,99 @@ def get_collections_html ():
|
|
continue
|
|
continue
|
|
|
|
|
|
collection_id = collection_file.name[:-len('.json')]
|
|
collection_id = collection_file.name[:-len('.json')]
|
|
|
|
+ version = coll.get('_version')
|
|
|
|
+
|
|
|
|
+ if not version: # legacy
|
|
|
|
+ version = str(ULID())
|
|
|
|
|
|
coll_info = dict(
|
|
coll_info = dict(
|
|
- collection_id = collection_id,
|
|
|
|
|
|
+ id = collection_id,
|
|
|
|
+ _version = version,
|
|
href = url_for('.get_collection_html', collection_id=collection_id)
|
|
href = url_for('.get_collection_html', collection_id=collection_id)
|
|
)
|
|
)
|
|
|
|
|
|
collections.append(coll_info)
|
|
collections.append(coll_info)
|
|
|
|
+
|
|
|
|
+ return collections
|
|
|
|
+
|
|
|
|
+# pagination token is the next tweet_ID
|
|
|
|
+@item_collections_bp.get('/collections.html')
|
|
|
|
+def get_collections_html ():
|
|
|
|
+ me = request.args.get('me')
|
|
|
|
|
|
|
|
+ collections = get_content('collections:list', me=me)
|
|
|
|
+
|
|
return jsonify(collections)
|
|
return jsonify(collections)
|
|
|
|
|
|
|
|
|
|
|
|
+def update_collection (collection_id, new_collection, op='replace', version=None, me = None):
|
|
|
|
+ path = f'.data/collection/{collection_id}.json'
|
|
|
|
+
|
|
|
|
+ existing_collection = None
|
|
|
|
+ if os.path.exists(path):
|
|
|
|
+ with open(path, 'rt', encoding='utf-8') as f:
|
|
|
|
+ existing_collection = json.load(f)
|
|
|
|
+
|
|
|
|
+ existing_version = existing_collection and existing_collection.get('_version')
|
|
|
|
+ if existing_collection and existing_version != version:
|
|
|
|
+ raise Error('updating with a wrong version. probably using a stale copy. fetch and retry op.')
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ if op == 'insert':
|
|
|
|
+ after_id = request.form.get('after_id')
|
|
|
|
+ raise Error('not supported yet')
|
|
|
|
+
|
|
|
|
+ elif op == 'append':
|
|
|
|
+ existing_collection['items'] += new_collection['items']
|
|
|
|
+ new_collection = existing_collection
|
|
|
|
+
|
|
|
|
+ elif op == 'prepend':
|
|
|
|
+ existing_collection['items'] = new_collection['items'] + existing_collection['items']
|
|
|
|
+ new_collection = existing_collection
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ new_version = str(ULID()) # content addressable hash of json w/o this key or similar
|
|
|
|
+ new_collection['_version'] = new_version
|
|
|
|
+
|
|
|
|
+ with open(path, 'wt', encoding='utf-8') as f:
|
|
|
|
+ json.dump(new_collection, f)
|
|
|
|
+
|
|
|
|
+ return new_version
|
|
|
|
+
|
|
|
|
+@item_collections_bp.post('/collection/<collection_id>.html')
|
|
|
|
+def post_collection_html (collection_id):
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ op = request.form.get('op', 'replace')
|
|
|
|
+ version = request.form.get('version')
|
|
|
|
+
|
|
|
|
+ new_collection = request.form.get('collection.json')
|
|
|
|
+ new_collection = json.loads(new_collection) # FIXME probably wrong
|
|
|
|
+
|
|
|
|
+ new_version = get_content('collection:update', collection_id, new_collection, op=op, me=me)
|
|
|
|
+
|
|
|
|
+ return jsonify({'_version': new_version})
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+@item_collections_bp.get('/collection/test-update/<collection_id>.html')
|
|
|
|
+def get_collection_test_update_html (collection_id):
|
|
|
|
+
|
|
|
|
+ me = None
|
|
|
|
+ op = 'prepend'
|
|
|
|
+ version = request.args.get('version')
|
|
|
|
+
|
|
|
|
+ new_collection = {
|
|
|
|
+ 'items': [{'id': 'zzz999'}]
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ new_version = get_content(f'collections:update:{collection_id}',
|
|
|
|
+ new_collection,
|
|
|
|
+ version=version,
|
|
|
|
+ op=op,
|
|
|
|
+ me=me)
|
|
|
|
+
|
|
|
|
+ return jsonify({'_version': new_version})
|
|
|
|
+
|
|
@item_collections_bp.post('/data/collection/create/from-cards')
|
|
@item_collections_bp.post('/data/collection/create/from-cards')
|
|
def post_data_collection_create_from_cards ():
|
|
def post_data_collection_create_from_cards ():
|
|
"""
|
|
"""
|
|
@@ -270,54 +356,88 @@ def post_data_collection_create_from_cards ():
|
|
|
|
|
|
return jsonify(collection)
|
|
return jsonify(collection)
|
|
|
|
|
|
-
|
|
|
|
-
|
|
|
|
-def expand_item2 (item, me, tweet_contents = None, includes = None, youtube_contents = None):
|
|
|
|
- if 'id' in item:
|
|
|
|
- tweets_response = tweet_contents[ 'twitter:tweet:' + item['id'] ]
|
|
|
|
- tweets = tweets_response.items
|
|
|
|
|
|
+def get_item_id (obj_or_dict, id_key='id'):
|
|
|
|
+ if type(obj_or_dict) == dict:
|
|
|
|
+ return obj_or_dict[id_key]
|
|
|
|
+ else:
|
|
|
|
+ return getattr(obj_or_dict, id_key)
|
|
|
|
+
|
|
|
|
+def expand_item2 (item, me, content_responses):
|
|
|
|
+
|
|
|
|
+ content_id = item['id']
|
|
|
|
+
|
|
|
|
+ content_response = content_responses[ content_id ]
|
|
|
|
+
|
|
|
|
+ if type(content_response) == CollectionPage:
|
|
|
|
+ tweets = content_response.items
|
|
|
|
+ elif type(content_response) == list:
|
|
|
|
+ tweets = content_response
|
|
|
|
+ else:
|
|
|
|
+ tweets = [content_response]
|
|
|
|
+
|
|
|
|
+ # endswith is a hack. Really FeedItems should return a full ID with prefix.
|
|
|
|
+ t = list(filter(lambda t: content_id.endswith(f':{get_item_id(t)}'), tweets))
|
|
|
|
+
|
|
|
|
+ if not len(t):
|
|
|
|
+ print("no tweet for item: " + item['id'])
|
|
|
|
+ feed_item = FeedItem(
|
|
|
|
+ id = item['id'],
|
|
|
|
+ text = "(Deleted, suspended or blocked)",
|
|
|
|
+ created_at = "",
|
|
|
|
+ handle = "error",
|
|
|
|
+ display_name = "Error"
|
|
|
|
+ )
|
|
|
|
+ # FIXME 1) put this in relative order to the collection
|
|
|
|
+ # FIXME 2) we can use the tweet link to get the user ID...
|
|
|
|
|
|
- t = list(filter(lambda t: item['id'] == t.id, tweets))
|
|
|
|
|
|
+ else:
|
|
|
|
+ feed_item = t[0]
|
|
|
|
+
|
|
|
|
+ note = item.get('note')
|
|
|
|
|
|
- if not len(t):
|
|
|
|
- print("no tweet for item: " + item['id'])
|
|
|
|
- feed_item = FeedItem(
|
|
|
|
- id = item['id'],
|
|
|
|
- text = "(Deleted, suspended or blocked)",
|
|
|
|
- created_at = "",
|
|
|
|
- handle = "error",
|
|
|
|
- display_name = "Error"
|
|
|
|
- )
|
|
|
|
- # FIXME 1) put this in relative order to the collection
|
|
|
|
- # FIXME 2) we can use the tweet link to get the user ID...
|
|
|
|
-
|
|
|
|
|
|
+ if type(feed_item) == dict:
|
|
|
|
+ feed_item.update(note = note)
|
|
else:
|
|
else:
|
|
- feed_item = t[0]
|
|
|
|
-
|
|
|
|
- note = item.get('note')
|
|
|
|
feed_item = replace(feed_item, note = note)
|
|
feed_item = replace(feed_item, note = note)
|
|
-
|
|
|
|
- elif 'yt_id' in item:
|
|
|
|
- yt_id = item['yt_id']
|
|
|
|
|
|
|
|
- yt_videos = youtube_contents[ 'youtube:video:' + yt_id ]
|
|
|
|
-
|
|
|
|
- feed_item = list(filter(lambda v: v['id'] == yt_id, yt_videos))[0]
|
|
|
|
-
|
|
|
|
- note = item.get('note')
|
|
|
|
- feed_item.update({'note': note})
|
|
|
|
-
|
|
|
|
|
|
+
|
|
|
|
|
|
|
|
|
|
return feed_item
|
|
return feed_item
|
|
|
|
|
|
|
|
|
|
-def get_collection (collection_id, pagination_token=None, max_results=10):
|
|
|
|
|
|
+def get_collection (collection_id, me=None, pagination_token:str = None, max_results:int =10):
|
|
collection = get_tweet_collection(collection_id)
|
|
collection = get_tweet_collection(collection_id)
|
|
|
|
|
|
|
|
+ if not collection:
|
|
|
|
+ return
|
|
|
|
+
|
|
|
|
+ first_idx = int(pagination_token or 0)
|
|
|
|
+ last_idx = first_idx + max_results
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ items = collection['items'][first_idx:last_idx]
|
|
|
|
+ content_ids = list(map(lambda item: item['id'], items))
|
|
|
|
+
|
|
|
|
+ content_responses = get_all_content( content_ids )
|
|
|
|
+
|
|
|
|
+ feed_items = list(map(lambda item: expand_item2(item, me, content_responses), items))
|
|
|
|
+
|
|
|
|
+ collection['items'] = feed_items
|
|
|
|
+
|
|
|
|
+ if len(collection['items']) == max_results:
|
|
|
|
+ collection['next_token'] = str(last_idx)
|
|
|
|
+
|
|
return collection
|
|
return collection
|
|
|
|
|
|
-register_content_source("collection:", get_collection, id_pattern="([^:]+)")
|
|
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def register_content_sources ():
|
|
|
|
+ register_content_source("collection:", get_collection, id_pattern="([^:]+)")
|
|
|
|
+ register_content_source("collections:list", get_collection_list, id_pattern="")
|
|
|
|
+ register_content_source("collections:update:", update_collection, id_pattern="([A-Za-z0-9\-\_\.]+)")
|
|
|
|
+
|
|
|
|
+
|
|
|
|
|
|
# pagination token is the next tweet_ID
|
|
# pagination token is the next tweet_ID
|
|
@item_collections_bp.get('/collection2/<collection_id>.html')
|
|
@item_collections_bp.get('/collection2/<collection_id>.html')
|
|
@@ -325,38 +445,26 @@ def get_collection2_html (collection_id):
|
|
me = request.args.get('me')
|
|
me = request.args.get('me')
|
|
acct = session.get(me)
|
|
acct = session.get(me)
|
|
|
|
|
|
- max_results = int(request.args.get('max_results', 10))
|
|
|
|
|
|
+ max_results = int(request.args.get('limit', 1))
|
|
|
|
|
|
- pagination_token = int(request.args.get('pagination_token', 0))
|
|
|
|
|
|
+ pagination_token = request.args.get('pagination_token', 0)
|
|
|
|
|
|
#collection = get_tweet_collection(collection_id)
|
|
#collection = get_tweet_collection(collection_id)
|
|
collection = get_content(f'collection:{collection_id}',
|
|
collection = get_content(f'collection:{collection_id}',
|
|
|
|
+ me=me,
|
|
pagination_token=pagination_token,
|
|
pagination_token=pagination_token,
|
|
max_results=max_results)
|
|
max_results=max_results)
|
|
|
|
|
|
if 'authorized_users' in collection and (not acct or not me in collection['authorized_users']):
|
|
if 'authorized_users' in collection and (not acct or not me in collection['authorized_users']):
|
|
return 'access denied.', 403
|
|
return 'access denied.', 403
|
|
|
|
|
|
- items = collection['items'][pagination_token:(pagination_token + max_results)]
|
|
|
|
|
|
+ feed_items = collection['items']
|
|
|
|
+ pagination_token = collection.get('next_token')
|
|
|
|
|
|
- if not len(items):
|
|
|
|
|
|
+ if not len(feed_items):
|
|
return 'no tweets', 404
|
|
return 'no tweets', 404
|
|
|
|
|
|
- tweet_ids = filter(lambda i: 'id' in i, items)
|
|
|
|
- tweet_ids = list(map(lambda item: 'twitter:tweet:' + item['id'], tweet_ids))
|
|
|
|
-
|
|
|
|
- tweet_contents = get_all_content( tweet_ids )
|
|
|
|
-
|
|
|
|
-
|
|
|
|
- yt_ids = filter(lambda i: 'yt_id' in i, items)
|
|
|
|
- yt_ids = list(map(lambda item: 'youtube:video:' + item['yt_id'], yt_ids))
|
|
|
|
|
|
|
|
- youtube_contents = get_all_content( yt_ids )
|
|
|
|
-
|
|
|
|
-
|
|
|
|
- includes = None
|
|
|
|
-
|
|
|
|
- feed_items = list(map(lambda item: expand_item2(item, me, tweet_contents, includes, youtube_contents), items))
|
|
|
|
|
|
|
|
if request.args.get('format') == 'json':
|
|
if request.args.get('format') == 'json':
|
|
return jsonify({'ids': tweet_ids,
|
|
return jsonify({'ids': tweet_ids,
|
|
@@ -368,11 +476,12 @@ def get_collection2_html (collection_id):
|
|
query = {}
|
|
query = {}
|
|
|
|
|
|
if pagination_token:
|
|
if pagination_token:
|
|
- query['next_data_url'] = url_for('.get_collection_html', collection_id=collection_id, pagination_token=pagination_token)
|
|
|
|
|
|
+ query['next_data_url'] = url_for('.get_collection2_html', collection_id=collection_id, pagination_token=pagination_token, limit=max_results, me=me)
|
|
|
|
+ query['next_page_url'] = url_for('.get_collection2_html', collection_id=collection_id, pagination_token=pagination_token, limit=max_results, me=me)
|
|
|
|
|
|
if 'HX-Request' in request.headers:
|
|
if 'HX-Request' in request.headers:
|
|
return render_template('partial/tweets-timeline.html', tweets = feed_items, user = {}, query = query)
|
|
return render_template('partial/tweets-timeline.html', tweets = feed_items, user = {}, query = query)
|
|
else:
|
|
else:
|
|
if pagination_token:
|
|
if pagination_token:
|
|
- query['next_page_url'] = url_for('.get_collection_html', collection_id=collection_id, pagination_token=pagination_token)
|
|
|
|
|
|
+ query['next_page_url'] = url_for('.get_collection2_html', me=me, collection_id=collection_id, pagination_token=pagination_token)
|
|
return render_template('tweet-collection.html', tweets = feed_items, user = {}, query = query)
|
|
return render_template('tweet-collection.html', tweets = feed_items, user = {}, query = query)
|