|
@@ -448,67 +448,243 @@ def get_tweet_html (tweet_id):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
-@twitter_app.route('/followers/<user_id>.html', methods=['GET'])
|
|
|
|
-def get_followers_html (user_id):
|
|
|
|
-
|
|
|
|
- if not g.twitter_user:
|
|
|
|
- return 'need to log in.', 403
|
|
|
|
|
|
+@twitter_app.route('/tweet2/<tweet_id>.html', methods=['GET'])
|
|
|
|
+def get_tweet2_html (tweet_id):
|
|
|
|
|
|
- use_cache = request.args.get('use_cache')
|
|
|
|
|
|
|
|
- token = g.twitter_user['access_token']
|
|
|
|
|
|
+ pagination_token = request.args.get('pagination_token')
|
|
|
|
+ view = request.args.get('view', 'replies')
|
|
|
|
|
|
- social_source = TwitterApiV2SocialGraph(token)
|
|
|
|
|
|
|
|
- if use_cache:
|
|
|
|
- print(f'using cache for user {user_id}: {use_cache}')
|
|
|
|
- with open(f'.data/cache/followers_{user_id}_{use_cache}.json', 'rt') as f:
|
|
|
|
- response_json = json.load(f)
|
|
|
|
|
|
+ if g.twitter_user:
|
|
|
|
+ token = g.twitter_user['access_token']
|
|
else:
|
|
else:
|
|
- response_json = social_source.get_followers(user_id, max_results=1000, return_dataclass=True)
|
|
|
|
|
|
+ token = os.environ.get('BEARER_TOKEN')
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ tweet_source = ApiV2TweetSource(token)
|
|
|
|
|
|
- ts = int(time.time() * 1000)
|
|
|
|
|
|
+ only_replies = view == 'replies'
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ tweets = []
|
|
|
|
+ if not pagination_token:
|
|
|
|
+ tweet_page = get_content(f'twitter:tweet:{tweet_id}', me=me)
|
|
|
|
|
|
- print(f'followers cache for {user_id}: {ts}')
|
|
|
|
|
|
+ tweets.append(tweet_page.items[0])
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ replies_page = get_content(f'twitter:tweets:replies:{tweet_id}', me=g.me)
|
|
|
|
+
|
|
|
|
+ skip_embed_replies = False
|
|
|
|
+
|
|
|
|
+ if view == 'replies':
|
|
|
|
+ replies_response = tweet_source.get_thread(tweet_id,
|
|
|
|
+ only_replies=True,
|
|
|
|
+ pagination_token = pagination_token,
|
|
|
|
+ return_dataclass=True)
|
|
|
|
+ elif view == 'thread':
|
|
|
|
+ skip_embed_replies = True
|
|
|
|
+ replies_response = tweet_source.get_thread(tweet_id,
|
|
|
|
+ only_replies=False,
|
|
|
|
+ author_id=tweets[0].author_id,
|
|
|
|
+ pagination_token = pagination_token,
|
|
|
|
+ return_dataclass=True)
|
|
|
|
+
|
|
|
|
+ elif view == 'conversation':
|
|
|
|
+ replies_response = tweet_source.get_thread(tweet_id,
|
|
|
|
+ only_replies=False,
|
|
|
|
+ pagination_token = pagination_token,
|
|
|
|
+ return_dataclass=True)
|
|
|
|
+ elif view == 'tweet':
|
|
|
|
+ replies_response = None
|
|
|
|
+
|
|
|
|
+ next_token = None
|
|
|
|
+
|
|
|
|
+ #print("conversation meta:")
|
|
|
|
+ #print(json.dumps(tweets_response.get('meta'), indent=2))
|
|
|
|
+
|
|
|
|
+ if replies_response and replies_response.meta and replies_response.meta.result_count:
|
|
|
|
+
|
|
|
|
+ includes = replies_response.includes
|
|
|
|
+ tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, g.me, expand_path=request.args.get('expand'), reply_depth=1), replies_response.data)) + tweets
|
|
|
|
|
|
- with open(f'{DATA_DIR}/cache/followers_{user_id}_{ts}.json', 'wt') as f:
|
|
|
|
- json.dump(response_json, f, indent=2)
|
|
|
|
|
|
+ next_token = replies_response.meta.next_token
|
|
|
|
+
|
|
|
|
+ # this method is OK except it doesn't work if there are no replies.
|
|
|
|
+ #tweets.append(tweet_model(includes, list(filter(lambda t: t['id'] == tweet_id, includes.get('tweets')))[0], me))
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ #related_tweets = [] # derived from includes
|
|
|
|
+
|
|
|
|
+ tweets.reverse()
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ query = {}
|
|
|
|
+
|
|
|
|
+ if next_token:
|
|
|
|
+ query = {
|
|
|
|
+ **query,
|
|
|
|
+ # FIXME only_replies
|
|
|
|
+ 'next_data_url': url_for('.get_tweet_html', tweet_id=tweet_id, pagination_token=next_token, only_replies = '1' if only_replies else '0', author_id = tweets[0].author_id),
|
|
|
|
+ 'next_page_url': url_for('.get_tweet_html', tweet_id=tweet_id, view=view, pagination_token=next_token)
|
|
|
|
+ }
|
|
|
|
|
|
- #print(response_json)
|
|
|
|
- #run_script('on_user_seen', {'twitter_user': g.twitter_user, 'users': response_json})
|
|
|
|
|
|
+ user = {
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if 'HX-Request' in request.headers:
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ # console.log(res.tweets.map(t => t.text).join("\n\n-\n\n"))
|
|
|
|
+ return render_template('partial/tweets-timeline.html', user = user, tweets = tweets, query = query)
|
|
|
|
+ else:
|
|
|
|
+ page_nav = [
|
|
|
|
+ dict(
|
|
|
|
+ href=url_for('.get_tweet_html', tweet_id=tweets[0].conversation_id, view='thread'),
|
|
|
|
+ label = 'author thread',
|
|
|
|
+ order = 10
|
|
|
|
+ ),
|
|
|
|
+ dict(
|
|
|
|
+ href = url_for('.get_tweet_html', tweet_id=tweets[0].conversation_id, view='conversation'),
|
|
|
|
+ label = 'full convo',
|
|
|
|
+ order = 20
|
|
|
|
+ )
|
|
|
|
+ ]
|
|
|
|
+
|
|
|
|
+ tweet = tweets_response.data[0]
|
|
|
|
+ user = list(filter(lambda u: u.id == tweet.author_id, tweets_response.includes.users))[0]
|
|
|
|
+
|
|
|
|
+ source_url = f'https://twitter.com/{user.username}/status/{tweet_id}'
|
|
|
|
+ title = f'Tweet by {user.name} at {tweet.created_at}'
|
|
|
|
+
|
|
|
|
+ opengraph_info = dict(
|
|
|
|
+ type = 'webpage', # threads might be article
|
|
|
|
+ url = source_url,
|
|
|
|
+ title = title,
|
|
|
|
+ description = tweet.text,
|
|
|
|
+ image = user.profile_image_url
|
|
|
|
+ )
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ if view == 'replies':
|
|
|
|
+ tweet = tweets[0]
|
|
|
|
+
|
|
|
|
+ if tweet.id == '1608510741941989378':
|
|
|
|
+ unreplied = [
|
|
|
|
+ UnrepliedSection(
|
|
|
|
+ description = "Not clear what GS is still.",
|
|
|
|
+ span = (40, 80)
|
|
|
|
+ )
|
|
|
|
+ ]
|
|
|
|
+ tweet = replace(tweet,
|
|
|
|
+ unreplied = unreplied
|
|
|
|
+ )
|
|
|
|
+
|
|
|
|
+ expand_parts = request.args.get('expand')
|
|
|
|
+ if expand_parts:
|
|
|
|
+ expand_parts = expand_parts.split(',')
|
|
|
|
+
|
|
|
|
+ def reply_to_thread_item (fi):
|
|
|
|
+ nonlocal expand_parts
|
|
|
|
+
|
|
|
|
+ if fi.id == '1609714342211244038':
|
|
|
|
+ print(f'reply_to_thread_item id={fi.id}')
|
|
|
|
+ unreplied = [
|
|
|
|
+ UnrepliedSection(
|
|
|
|
+ description = "Is there proof of this claim?",
|
|
|
|
+ span = (40, 80)
|
|
|
|
+ )
|
|
|
|
+ ]
|
|
|
|
+ fi = replace(fi,
|
|
|
|
+ unreplied = unreplied
|
|
|
|
+ )
|
|
|
|
+
|
|
|
|
+ children = None
|
|
|
|
+
|
|
|
|
+ if expand_parts and len(expand_parts) and fi.id == expand_parts[0]:
|
|
|
|
+ expand_parts = expand_parts[1:]
|
|
|
|
+
|
|
|
|
+ print(f'getting expanded replied for tweet={fi.id}')
|
|
|
|
+
|
|
|
|
+ expanded_replies_response = tweet_source.get_thread(fi.id,
|
|
|
|
+ only_replies=True,
|
|
|
|
+ return_dataclass=True)
|
|
|
|
+ if expanded_replies_response.data:
|
|
|
|
+ print('we got expanded responses data')
|
|
|
|
+
|
|
|
|
+ children = list(map(lambda t: tweet_model_dc_vm(expanded_replies_response.includes, t, g.me, expand_path=request.args.get('expand'), reply_depth=1), expanded_replies_response.data))
|
|
|
|
+ children = list(map(reply_to_thread_item, children))
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ return ThreadItem(feed_item=fi, children=children)
|
|
|
|
+
|
|
|
|
+ children = list(map(reply_to_thread_item, tweets[1:]))
|
|
|
|
+
|
|
|
|
+ root = ThreadItem(
|
|
|
|
+ feed_item = tweet,
|
|
|
|
+ children = children
|
|
|
|
+ )
|
|
|
|
+ return render_template('tweet-thread.html', user = user, root = root, query = query, page_nav=page_nav, skip_embed_replies=skip_embed_replies, opengraph_info=opengraph_info)
|
|
|
|
+ else:
|
|
|
|
+ return render_template(f'tweet-collection{theme_variant}.html', user = user, tweets = tweets, query = query, page_nav=page_nav, skip_embed_replies=skip_embed_replies, opengraph_info=opengraph_info)
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+@twitter_app.route('/followers/<user_id>.html', methods=['GET'])
|
|
|
|
+def get_followers_html (user_id):
|
|
|
|
|
|
- #followers = list(map(lambda f: f['id'], response_json.get('data')))
|
|
|
|
- followers = response_json.data
|
|
|
|
|
|
+ me = g.me
|
|
|
|
|
|
- followers = list(map(user_model_dc, followers))
|
|
|
|
|
|
+ content_params = cleandict({
|
|
|
|
+ 'max_results': int(request.args.get('max_results', 1000)),
|
|
|
|
+ 'pagination_token': request.args.get('pagination_token')
|
|
|
|
+ })
|
|
|
|
+
|
|
|
|
+ followers_page = get_content(f'twitter:followers:user:{user_id}', **content_params)
|
|
|
|
+
|
|
|
|
+ followers = followers_page.items
|
|
|
|
+
|
|
|
|
+ content_params['pagination_token'] = followers_page.next_token
|
|
|
|
+
|
|
|
|
+ query = {
|
|
|
|
+ 'next_data_url': url_for('.get_followers_html', me=me, user_id=user_id, **content_params)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if 'HX-Request' in request.headers:
|
|
|
|
+ return render_template('partial/users-list.html', users=followers, query=query)
|
|
|
|
+ else:
|
|
|
|
+ return render_template('followers.html', users=followers, query=query)
|
|
|
|
|
|
- return render_template('following.html', users=followers)
|
|
|
|
|
|
|
|
|
|
|
|
@twitter_app.route('/following/<user_id>.html', methods=['GET'])
|
|
@twitter_app.route('/following/<user_id>.html', methods=['GET'])
|
|
def get_following_html (user_id):
|
|
def get_following_html (user_id):
|
|
|
|
|
|
- if not g.twitter_user:
|
|
|
|
- return 'need to log in.', 403
|
|
|
|
|
|
+ me = g.me
|
|
|
|
|
|
- token = g.twitter_user['access_token']
|
|
|
|
|
|
+ content_params = cleandict({
|
|
|
|
+ 'max_results': int(request.args.get('max_results', 1000)),
|
|
|
|
+ 'pagination_token': request.args.get('pagination_token')
|
|
|
|
+ })
|
|
|
|
|
|
- social_source = TwitterApiV2SocialGraph(token)
|
|
|
|
|
|
+ following_page = get_content(f'twitter:following:users:{user_id}', **content_params)
|
|
|
|
|
|
- response_json = social_source.get_following(user_id, max_results=1000, return_dataclass=True)
|
|
|
|
|
|
+ following = following_page.items
|
|
|
|
|
|
- ts = int(time.time() * 1000)
|
|
|
|
- with open(f'{DATA_DIR}/cache/following_{user_id}_{ts}.json', 'wt') as f:
|
|
|
|
- f.write(json.dumps(response_json))
|
|
|
|
-
|
|
|
|
- #print(response_json)
|
|
|
|
- #run_script('on_user_seen', {'twitter_user': g.twitter_user, 'users': response_json})
|
|
|
|
|
|
+ content_params['pagination_token'] = following_page.next_token
|
|
|
|
|
|
- #following = list(map(lambda f: f['id'], response_json.get('data')))
|
|
|
|
-
|
|
|
|
- following = list(map(user_model_dc, response_json.data))
|
|
|
|
|
|
+ query = {
|
|
|
|
+ 'next_data_url': url_for('.get_following_html', me=me, user_id=user_id, **content_params)
|
|
|
|
+ }
|
|
|
|
|
|
- return render_template('following.html', users=following)
|
|
|
|
|
|
+ if 'HX-Request' in request.headers:
|
|
|
|
+ return render_template('partial/users-list.html', users=following, query=query)
|
|
|
|
+ else:
|
|
|
|
+ return render_template('following.html', users=following, query=query)
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------------------------------------
|
|
# ---------------------------------------------------------------------------------------------------------
|
|
@@ -601,14 +777,10 @@ def get_timeline_home_html (variant = "reverse_chronological", pagination_token=
|
|
'end_time': request.args.get('end_time')
|
|
'end_time': request.args.get('end_time')
|
|
})
|
|
})
|
|
|
|
|
|
- tweet_source = ApiV2TweetSource(token)
|
|
|
|
- response = tweet_source.get_home_timeline(user_id, **tq)
|
|
|
|
-
|
|
|
|
- #print(json.dumps(response_json, indent=2))
|
|
|
|
|
|
+ timeline_page = get_content(f'twitter:feed:reverse_chronological:user:{user_id}', me=g.me, **tq)
|
|
|
|
|
|
- includes = response.includes
|
|
|
|
- tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, g.me), response.data))
|
|
|
|
- next_token = response.meta.next_token
|
|
|
|
|
|
+ next_token = timeline_page.next_token
|
|
|
|
+ tweets = timeline_page.items
|
|
|
|
|
|
tq['pagination_token'] = next_token
|
|
tq['pagination_token'] = next_token
|
|
|
|
|