Bladeren bron

snapshot 2023-05-05

Harlan Iverson 1 jaar geleden
bovenliggende
commit
09af49f48c

+ 52 - 0
docs/dev/architecture.md

@@ -0,0 +1,52 @@
+
+
+
+## Edge Case Debates
+
+* Is an email Inbox/conversation more like a collection or a feed?
+
+
+## Content System
+
+Content sources can be registered to handle IDs. The first to be registered for a pattern will be used.
+
+It should take into account things like connectivity, availability, etc.
+
+Live API, Offline cache, Archive sources.
+
+If the live API only has 7 days of data, we may fall back cached or exported archive.
+
+## Testing
+
+Unit and E2E tests run offline, except integration portions.
+
+We can use the cached responses and responses module to accomplish this.
+
+## Provider template
+
+Twitter is the furthest along, but each has some distinct features.
+
+I was learning Python as I built this project.
+
+## Extensibility
+
+We rely as much as possible on the constituent parts that we rely on.
+
+Trac is an inspiration but invents everything from scratch which is over-engineering for us at this point.
+
+We'll vet the pieces that we use well and rely on them. Trac for the HTTP routing and themes.
+
+Dataclasses for the serialization.
+
+https://stackoverflow.com/questions/7505988/importing-from-a-relative-path-in-python
+
+https://www.geeksforgeeks.org/absolute-and-relative-imports-in-python/
+
+https://stackoverflow.com/questions/40892104/how-to-organize-one-blueprint-with-multiple-routes-files
+
+### Content
+
+### Themes
+
+### Routes
+

+ 48 - 0
docs/dev/operations.md

@@ -0,0 +1,48 @@
+# hogumathi Operations
+
+
+## Release packaging steps
+
+1) Clear python cache files:
+
+https://stackoverflow.com/questions/28991015/python3-project-remove-pycache-folders-and-pyc-files#30659970
+
+2) Create hogumathi-app_v__ and hogumathi-extensions_v___ directories.
+3) Copy everthing into hogumathi-app_v___
+4) Create a lib2 and extensions2 directory next to lib and extensions
+5) Cut premium libs and extensions into the _2 folder, respectively.
+6) Cut premium files and paste them into hogumathi-extensions_v___
+7) rename _2 to remove the 2, simply lib and extensions.
+8) After staging, create a zip for each and distribute
+
+## Staging steps
+
+1) Copy hogumathi-app_v___
+2) Cut .env and .data into the copied directory
+3) Run hogumathi from within the staging directory
+4) QA the public release
+
+5) Copy content of hogumathi-extensions_v___ into staging directory
+6) Run hogumathi from within the staging directory
+7) QA the premium release
+
+## Deploy steps
+
+1) Upload the hogumathi-app_v__ and hogumathi-extensions_v___ zip files to the Glitch assets tab
+2) In Glitch terminal, change to .data/releases
+3) Delete prior releases
+4) Wget the URLs from the assets
+5) Unzip the two .zip files
+6) Copy the content of hogumathi-app_v__ directory to ~
+7) Copy the content of hogumathi-extensions_v___ directory to ~/.data
+8) Type 'refresh' into the console
+9) Ensure operation
+10) Delete the zip files from assets
+
+
+## Distribution steps
+
+1) Upload to Hogumathi Premium subscription on the Content tab; ensure it's available to all tiers.
+2) Post an announcement to Patreon, twitter and other channels.
+
+

+ 61 - 0
docs/dev/syndication_taxonomy.md

@@ -0,0 +1,61 @@
+# Syndication Taxonomy
+
+These items are implemented as Dataclasses and are network serializable.
+
+They are designed as a ViewModel.
+
+The Hogumathi feed syndication will be the real test of whether this is a viable approach...
+
+Strictly the network should have a different ValueObject.
+
+"Actions" will be tricky. I think they're a map currently, and require regular expressions.
+
+## FeedItem
+
+Presently this is implemented as a ViewModel.
+
+## FeedItemMedia
+
+Media item attached to a FeedItem
+
+## User
+
+## Feed
+
+FeedItems - chronologically ordered
+
+## Collection
+
+CollectionItem - ordered by 'user'
+
+
+## CollectionItem
+
+(presently implemented as FeedItem. Could end up nesting FeedItem)
+
+Think of Search and Playlist as a user, maybe read only access
+
+## List
+
+ListMembers - ordered
+
+## ListMember
+
+User
+
+## Conversation
+
+Ordered collection of Messages
+
+## Message
+
+From User. (Participant?)
+
+To Users.
+
+FeedItems can be attached.
+
+
+## CollectionPage
+
+A page of ViewModel collection items with a next_token.

+ 12 - 6
extensions/twitter_archive_facade/facade.py

@@ -4,7 +4,7 @@ from dacite import from_dict
 
 
 from configparser import ConfigParser
 from configparser import ConfigParser
 import base64
 import base64
-from flask import Flask, json, Response, render_template, request, send_from_directory, Blueprint, url_for, g
+from flask import Flask, json, Response, render_template, request, send_from_directory, Blueprint, url_for, g, jsonify
 from flask_cors import CORS
 from flask_cors import CORS
 import sqlite3
 import sqlite3
 import os
 import os
@@ -440,7 +440,7 @@ def get_tweet_html (tweet_id = None):
     if not tweet_id:
     if not tweet_id:
         ids = request.args.get('ids').split(',')
         ids = request.args.get('ids').split(',')
         
         
-        collection_page = content_system.get_content('twitter:tweets', ids=ids, content_source_id='twitter_archive_facade.facade:get_tweets')
+        collection_page = content_system.get_content('twitter:tweets', ids=tuple(ids), content_source_id='twitter_archive_facade.facade:get_tweets')
         
         
         tweets = collection_page.items
         tweets = collection_page.items
     else:
     else:
@@ -571,8 +571,8 @@ from tweet
 
 
 
 
 
 
-@twitter_app.route('/tweets/on-this-day.html', methods=['GET'])
-def get_tweets_on_this_day ():
+@twitter_app.route('/tweets/on-this-day.<response_format>', methods=['GET'])
+def get_tweets_on_this_day (response_format):
     
     
     otd_method = request.args.get("otd_method", "traditional")
     otd_method = request.args.get("otd_method", "traditional")
     
     
@@ -617,14 +617,20 @@ where
     
     
     sql_params = []
     sql_params = []
     
     
-    collection_page = content_system.get_content('twitter:tweets:search:sql', sql=sql, sql_params=sql_params)
+    collection_page = content_system.get_content('twitter:tweets:search:sql', sql=sql, sql_params=tuple(sql_params))
     
     
     tweets = collection_page.items
     tweets = collection_page.items
     
     
     query = {}
     query = {}
     profile_user = {}
     profile_user = {}
     
     
-    return render_template('search.html', user = profile_user, tweets = tweets, query = query)
+    if response_format == 'html':
+        return render_template('search.html', user = profile_user, tweets = tweets, query = query)
+    elif response_format == 'json':
+        response = dict(
+            data = tweets
+        )
+        return jsonify(response)
 
 
 
 
 
 

+ 392 - 16
extensions/twitter_v2_facade/content_source.py

@@ -8,19 +8,301 @@ And the rest of the Taxonomy.
 """
 """
 
 
 from dataclasses import asdict
 from dataclasses import asdict
+from typing import List, Optional
 import os
 import os
 from flask import session, g, request
 from flask import session, g, request
 import time
 import time
+from datetime import datetime, timezone
 import json
 import json
+import sqlite3
 
 
 from twitter_v2.api import ApiV2TweetSource, TwitterApiV2SocialGraph, ApiV2ConversationSource
 from twitter_v2.api import ApiV2TweetSource, TwitterApiV2SocialGraph, ApiV2ConversationSource
 
 
+import hogumathi_app.view_model as h_vm
+
+
 from hogumathi_app.view_model import CollectionPage, cleandict
 from hogumathi_app.view_model import CollectionPage, cleandict
+from hogumathi_app.content_system import register_content_source, get_content, register_hook
+
 from .view_model import tweet_model_dc_vm, user_model_dc
 from .view_model import tweet_model_dc_vm, user_model_dc
 
 
-from hogumathi_app.content_system import register_content_source, get_content, register_hook
 
 
 DATA_DIR='.data'
 DATA_DIR='.data'
+CACHE_PATH = f'{DATA_DIR}/twitter_v2_cache.db'
+
+def init_cache_db ():
+    db = sqlite3.connect(CACHE_PATH)
+    
+    cur = db.cursor()
+    
+    table_exists = cur.execute(f"SELECT count(*) FROM sqlite_master WHERE type='table' AND name='tweet'").fetchone()[0]
+    
+    if not table_exists:
+        cur.execute("""
+        create table query (
+            created_at timestamp,
+            user_id text,
+            last_accessed_at timestamp,
+            next_token text,
+            query_type text,
+            auth_user_id text
+        )
+        """)
+        
+        cur.execute("""
+        create table tweet (
+            id text,
+            accessed_at timestamp,
+            query_id int,
+            data text,
+            unique(id, query_id)
+        )
+        """)
+        
+        cur.execute("""
+        create table user (
+            id text,
+            accessed_at timestamp,
+            query_id int,
+            data text,
+            unique(id, query_id)
+        )
+        """)
+        
+        cur.execute("""
+        create table medium (
+            id text,
+            accessed_at timestamp,
+            query_id int,
+            data text,
+            unique(id, query_id)
+        )
+        """)
+        cur.connection.commit()
+        print(f'--- created {CACHE_PATH}')
+        
+    cur.close()
+
+def cache_tweets_response (response_tweets, query_type, auth_user_id, user_id = None, pagination_token=None, ts=None):
+    """
+    In bookmarks I observed that the same next_token is returned even with distinct new queries started.
+    
+    So in the case of abandoned paginations, we can end up with duplicate next_token records,
+    meaning we could update the wrong query_id, having downstream timestamp effects.
+    """
+    includes = response_tweets.includes
+    tweets = response_tweets.data or []
+    users = includes and includes.users or []
+    media = includes and includes.media or []
+    next_token = response_tweets.meta.next_token
+    
+    db = sqlite3.connect(CACHE_PATH)
+    cur = db.cursor()
+    
+    # SQLite is naive by default, so make sure this is UTC.
+    now = datetime.now(timezone.utc)
+    if ts:
+        now = ts
+    
+    if not pagination_token:
+        cur.execute("""
+            insert into query (
+                created_at,
+                last_accessed_at,
+                user_id,
+                next_token,
+                query_type,
+                auth_user_id
+                )
+                values (
+                    ?,?,?,?,?,?
+                )
+        """,
+        [now, now, user_id, next_token, query_type, auth_user_id]
+        )
+        
+        query_id = cur.lastrowid
+    else:
+        query_id = cur.execute("""
+                select rowid from query
+                where next_token = :next_token
+            """,
+            {
+                'next_token': pagination_token
+            }).fetchone()[0]
+        
+        cur.execute("""
+                update query set 
+                    last_accessed_at = :last_accessed_at, 
+                    next_token = :next_token 
+                    where rowid = :query_id
+            """,
+            {
+                'last_accessed_at': now,
+                'next_token': next_token,
+                'query_id': query_id
+            })
+    
+    for tweet in tweets:
+        tweet_json = json.dumps(cleandict(asdict(tweet)))
+        
+        cur.execute("""
+            insert or ignore into tweet (
+                id,
+                accessed_at,
+                query_id,
+                data
+                )
+            values (
+                ?,?,?,?
+                )
+            """,
+            [ tweet.id, now, query_id, tweet_json ]
+            )
+    
+    for user in users:
+        user_json = json.dumps(cleandict(asdict(user)))
+        
+        cur.execute("""
+            insert or ignore into user (
+                id,
+                accessed_at,
+                query_id,
+                data
+                )
+            values (
+                ?,?,?,?
+                )
+            """,
+            [ user.id, now, query_id, user_json ]
+            )
+    
+    for medium in media:
+        medium_json = json.dumps(cleandict(asdict(medium)))
+        
+        cur.execute("""
+            insert or ignore into medium (
+                id,
+                accessed_at,
+                query_id,
+                data
+                )
+            values (
+                ?,?,?,?
+                )
+            """,
+            [ medium.media_key, now, query_id, medium_json ]
+            )
+    
+    cur.connection.commit()
+    cur.close()
+
+def cache_users_response (response_users, query_type, auth_user_id, user_id = None, pagination_token=None, ts=None):
+    users = response_users.data or []
+    next_token = response_users.meta and response_users.meta.get('next_token')
+    
+    db = sqlite3.connect(CACHE_PATH)
+    cur = db.cursor()
+    
+    # SQLite is naive by default, so make sure this is UTC.
+    now = None
+    if ts:
+        now = ts
+    
+    if not pagination_token:
+        cur.execute("""
+            insert into query (
+                created_at,
+                last_accessed_at,
+                user_id,
+                next_token,
+                query_type,
+                auth_user_id
+                )
+                values (
+                    ?,?,?,?,?,?
+                )
+        """,
+        [now, now, user_id, next_token, query_type, auth_user_id]
+        )
+        
+        query_id = cur.lastrowid
+    else:
+        query_id = cur.execute("""
+                select rowid from query
+                where next_token = :next_token
+            """,
+            {
+                'next_token': pagination_token
+            }).fetchone()[0]
+        
+        cur.execute("""
+                update query set 
+                    last_accessed_at = :last_accessed_at, 
+                    next_token = :next_token 
+                    where rowid = :query_id
+            """,
+            {
+                'last_accessed_at': now,
+                'next_token': next_token,
+                'query_id': query_id
+            })
+    
+    
+    for user in users:
+        user_json = json.dumps(cleandict(asdict(user)))
+        
+        cur.execute("""
+            insert or ignore into user (
+                id,
+                accessed_at,
+                query_id,
+                data
+                )
+            values (
+                ?,?,?,?
+                )
+            """,
+            [ user.id, now, query_id, user_json ]
+            )
+    
+    cur.connection.commit()
+    cur.close()
+
+
+def get_cached_query (query_type, auth_user_id, user_id=None):
+    sql = """
+        select * from query 
+        where 
+            (auth_user_id in ('14520320') or auth_user_id is null)
+            and query_type = 'bookmarks'
+    """
+    results = []
+    next_token = None
+    
+    return results, next_token
+    
+def get_object_over_time (obj_type, obj_id, auth_user_id):
+    cur = None
+    
+    results = cur.execute(f"""
+        --select id, count(*) c from tweet group by id having c > 1
+        
+        select t.*
+        from {obj_type} t, query q
+        where 
+            t.id = :obj_id
+            and q.rowid = t.query_id
+            and (q.auth_user_id in (:auth_user_id) or q.auth_user_id is null)
+        """,
+        {
+            'obj_id': obj_id,
+            'auth_user_id': auth_user_id
+        })
+    results = []
+    next_token = None
+    
+    return results, next_token
 
 
 def get_tweet_item (tweet_id, me=None):
 def get_tweet_item (tweet_id, me=None):
     
     
@@ -66,6 +348,50 @@ def get_tweet_item (tweet_id, me=None):
     return collection_page
     return collection_page
 
 
 
 
+
+
+
+def tweet_embed_template (tweet_id):
+    features = '{"tfw_timeline_list":{"bucket":[],"version":null},"tfw_follower_count_sunset":{"bucket":true,"version":null},"tfw_tweet_edit_backend":{"bucket":"on","version":null},"tfw_refsrc_session":{"bucket":"on","version":null},"tfw_mixed_media_15897":{"bucket":"treatment","version":null},"tfw_experiments_cookie_expiration":{"bucket":1209600,"version":null},"tfw_duplicate_scribes_to_settings":{"bucket":"on","version":null},"tfw_video_hls_dynamic_manifests_15082":{"bucket":"true_bitrate","version":null},"tfw_legacy_timeline_sunset":{"bucket":true,"version":null},"tfw_tweet_edit_frontend":{"bucket":"on","version":null}}'
+    
+    # base64 + encode URI component
+    features_encoded = 'eyJ0ZndfdGltZWxpbmVfbGlzdCI6eyJidWNrZXQiOltdLCJ2ZXJzaW9uIjpudWxsfSwidGZ3X2ZvbGxvd2VyX2NvdW50X3N1bnNldCI6eyJidWNrZXQiOnRydWUsInZlcnNpb24iOm51bGx9LCJ0ZndfdHdlZXRfZWRpdF9iYWNrZW5kIjp7ImJ1Y2tldCI6Im9uIiwidmVyc2lvbiI6bnVsbH0sInRmd19yZWZzcmNfc2Vzc2lvbiI6eyJidWNrZXQiOiJvbiIsInZlcnNpb24iOm51bGx9LCJ0ZndfbWl4ZWRfbWVkaWFfMTU4OTciOnsiYnVja2V0IjoidHJlYXRtZW50IiwidmVyc2lvbiI6bnVsbH0sInRmd19leHBlcmltZW50c19jb29raWVfZXhwaXJhdGlvbiI6eyJidWNrZXQiOjEyMDk2MDAsInZlcnNpb24iOm51bGx9LCJ0ZndfZHVwbGljYXRlX3NjcmliZXNfdG9fc2V0dGluZ3MiOnsiYnVja2V0Ijoib24iLCJ2ZXJzaW9uIjpudWxsfSwidGZ3X3ZpZGVvX2hsc19keW5hbWljX21hbmlmZXN0c18xNTA4MiI6eyJidWNrZXQiOiJ0cnVlX2JpdHJhdGUiLCJ2ZXJzaW9uIjpudWxsfSwidGZ3X2xlZ2FjeV90aW1lbGluZV9zdW5zZXQiOnsiYnVja2V0Ijp0cnVlLCJ2ZXJzaW9uIjpudWxsfSwidGZ3X3R3ZWV0X2VkaXRfZnJvbnRlbmQiOnsiYnVja2V0Ijoib24iLCJ2ZXJzaW9uIjpudWxsfX0%3D'
+    
+    
+    origin = f"http%3A%2F%2Flocalhost%3A5004%2Ftwitter%2Ftweet2%2F{tweet_id}.html"
+    
+    width = 550
+    height = 755
+    theme = "dark" # or light
+    hide_card = "false"
+    hide_thread = "false"
+    
+    src = f"https://platform.twitter.com/embed/Tweet.html?dnt=true&features={features_encoded}&origin={origin}&frame=false&hideCard={hide_card}&hideThread={hide_thread}&id={tweet_id}&lang=en&theme=dark&width={width}px"
+    
+    html = f"""
+    <iframe src="{src}" data-tweet-id="{tweet_id}"
+        scrolling="no" frameborder="0" allowtransparency="true" allowfullscreen="true" class="" style="position: static; visibility: visible; width: {width}px; height: {height}px; display: block; flex-grow: 1;" title="Twitter Tweet"
+    ></iframe>
+    """
+    
+    return html
+
+# https://developer.twitter.com/en/docs/twitter-for-websites/embedded-tweets/overview
+def get_tweet_embed (tweet_id):
+    
+    html = tweet_embed_template(tweet_id)
+    post = h_vm.FeedItem(
+        id = tweet_id,
+        created_at = 'some time',
+        display_name = 'Twitter User',
+        handle = 'tweetuser',
+        
+        html = html
+    )
+
+    return post
+
+
 def get_bookmarks_feed (user_id, pagination_token=None, max_results=10, me=None):
 def get_bookmarks_feed (user_id, pagination_token=None, max_results=10, me=None):
     
     
     if not me:
     if not me:
@@ -90,6 +416,8 @@ def get_bookmarks_feed (user_id, pagination_token=None, max_results=10, me=None)
     
     
     #print(response_json)
     #print(response_json)
     
     
+    cache_tweets_response(response_tweets, 'bookmarks', user_id, user_id=user_id, pagination_token=pagination_token)
+    
     includes = response_tweets.includes
     includes = response_tweets.includes
     tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, me), response_tweets.data))
     tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, me), response_tweets.data))
     next_token = response_tweets.meta.next_token
     next_token = response_tweets.meta.next_token
@@ -118,7 +446,7 @@ def get_bookmarks_feed (user_id, pagination_token=None, max_results=10, me=None)
     
     
     return collection_page
     return collection_page
 
 
-def get_user_feed (user_id, pagination_token=None, me=None, exclude_replies=False, exclude_retweets=True, format=None):
+def get_user_feed (user_id, me=None, **twitter_kwargs):
     
     
     if not me and 'me' in g:
     if not me and 'me' in g:
         me = g.me
         me = g.me
@@ -127,18 +455,17 @@ def get_user_feed (user_id, pagination_token=None, me=None, exclude_replies=Fals
         token = g.twitter_user['access_token']
         token = g.twitter_user['access_token']
         # issue: retweets don't come back if we request non_public_metrics
         # issue: retweets don't come back if we request non_public_metrics
         is_me = False and user_id == g.twitter_user['id']
         is_me = False and user_id == g.twitter_user['id']
+        auth_user_id = g.twitter_user['id']
     else:
     else:
         token = os.environ.get('BEARER_TOKEN')
         token = os.environ.get('BEARER_TOKEN')
         is_me = False
         is_me = False
+        auth_user_id = None
     
     
     
     
     tweet_source = ApiV2TweetSource(token)
     tweet_source = ApiV2TweetSource(token)
     tweets_response = tweet_source.get_user_timeline(user_id,
     tweets_response = tweet_source.get_user_timeline(user_id,
-                        exclude_replies = exclude_replies,
-                        exclude_retweets = exclude_retweets,
-                        pagination_token = pagination_token,
-                        non_public_metrics = False,
-                        return_dataclass=True)
+                        return_dataclass=True,
+                        **twitter_kwargs)
     
     
     tweets = None
     tweets = None
     if not tweets_response:
     if not tweets_response:
@@ -156,6 +483,10 @@ def get_user_feed (user_id, pagination_token=None, me=None, exclude_replies=Fals
         print('profile get_user_timeline errors:')
         print('profile get_user_timeline errors:')
         print(tweets_response.errors)
         print(tweets_response.errors)
     
     
+    pagination_token=twitter_kwargs.get('pagination_token')
+    
+    cache_tweets_response(tweets_response, 'user_feed', auth_user_id, user_id=user_id, pagination_token=pagination_token)
+    
     ts = int(time.time() * 1000)
     ts = int(time.time() * 1000)
     with open(f'{DATA_DIR}/cache/tl_{user_id}_{ts}_{pagination_token}.json', 'wt') as f:
     with open(f'{DATA_DIR}/cache/tl_{user_id}_{ts}_{pagination_token}.json', 'wt') as f:
         f.write(json.dumps(cleandict(asdict(tweets_response))))
         f.write(json.dumps(cleandict(asdict(tweets_response))))
@@ -180,37 +511,53 @@ def get_tweets_collection (content_ids, pagination_token=None, max_results=None)
     """
     """
     return []
     return []
     
     
-def get_user (user_id, me=None):
+def get_user (user_id, me=None) -> Optional[h_vm.FeedServiceUser]:
     
     
+    users = get_users([user_id], me=me)
+    
+    if users:
+        return users[0]
+    
+
+def get_users (content_ids, me=None, pagination_token=None) -> Optional[List[h_vm.FeedServiceUser]]:
+    """
+    
+    """
     if me:
     if me:
         twitter_user = session.get(me)
         twitter_user = session.get(me)
         token = twitter_user['access_token']
         token = twitter_user['access_token']
+        auth_user_id = twitter_user['id']
     else:
     else:
         token = os.environ.get('BEARER_TOKEN')
         token = os.environ.get('BEARER_TOKEN')
+        auth_user_id = None
     
     
     social_graph = TwitterApiV2SocialGraph(token)
     social_graph = TwitterApiV2SocialGraph(token)
-    users_response = social_graph.get_user(user_id, return_dataclass=True)
-    
-    print(users_response)
+    users_response = social_graph.get_users(content_ids, return_dataclass=True)
     
     
     if not len(users_response.data):
     if not len(users_response.data):
         return
         return
     
     
-    user = user_model_dc(users_response.data[0])
+    cache_users_response(users_response, f'users', auth_user_id, pagination_token=pagination_token)
     
     
-    return user
+    users = list(map(user_model_dc, users_response.data))
     
     
+    return users
 
 
 def get_home_feed (user_id, me, **query_kwargs):
 def get_home_feed (user_id, me, **query_kwargs):
     
     
     twitter_user = session.get(me)
     twitter_user = session.get(me)
     token = twitter_user['access_token']
     token = twitter_user['access_token']
+    auth_user_id = twitter_user['id']
     
     
     tweet_source = ApiV2TweetSource(token)
     tweet_source = ApiV2TweetSource(token)
     response = tweet_source.get_home_timeline(user_id, **query_kwargs)
     response = tweet_source.get_home_timeline(user_id, **query_kwargs)
     
     
     #print(json.dumps(response_json, indent=2))
     #print(json.dumps(response_json, indent=2))
     
     
+    pagination_token = query_kwargs.get('pagination_token')
+    
+    cache_tweets_response(response, 'home_feed', auth_user_id, user_id=user_id, pagination_token=pagination_token)
+    
     includes = response.includes
     includes = response.includes
     tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, me), response.data))
     tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, me), response.data))
     next_token = response.meta.next_token
     next_token = response.meta.next_token
@@ -224,9 +571,23 @@ def get_home_feed (user_id, me, **query_kwargs):
     return collection_page
     return collection_page
 
 
 
 
+def get_author_threads (user_id):
+    """
+    Placeholder implementation where we can manually add threads to a collection,
+    but ultimately we will query a local Tweet DB that gets populated through various means.
+    
+    Once we store Tweets we can easily query this.
+    
+    We can filter by author_id,conversation_id order by in_reply_to_tweet_id,id
+    """
+    return get_content(f'collection:twitter.threads_{user_id}')
 
 
 def get_tweet_replies (conversation_id, in_reply_to_id=None, pagination_token=None, max_results=None, author_id=None):
 def get_tweet_replies (conversation_id, in_reply_to_id=None, pagination_token=None, max_results=None, author_id=None):
+    """
+    New function, not used yet
+    """
     tweet_source = ApiV2TweetSource(token)
     tweet_source = ApiV2TweetSource(token)
+    auth_user_id = None
     
     
     only_replies = view == 'replies'
     only_replies = view == 'replies'
     
     
@@ -263,7 +624,8 @@ def get_tweet_replies (conversation_id, in_reply_to_id=None, pagination_token=No
     #print(json.dumps(tweets_response.get('meta'), indent=2))
     #print(json.dumps(tweets_response.get('meta'), indent=2))
     
     
     if replies_response and replies_response.meta and replies_response.meta.result_count:
     if replies_response and replies_response.meta and replies_response.meta.result_count:
-    
+        cache_tweets_response(replies_response, 'tweet_replies', auth_user_id, user_id=user_id, pagination_token=pagination_token)
+        
         includes = replies_response.includes
         includes = replies_response.includes
         tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, g.me, expand_path=request.args.get('expand'), reply_depth=1), replies_response.data)) + tweets
         tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, g.me, expand_path=request.args.get('expand'), reply_depth=1), replies_response.data)) + tweets
         
         
@@ -363,16 +725,20 @@ def get_following_users (user_id, me=None, max_results=1000, pagination_token=No
     if me:
     if me:
         twitter_user = session.get(me)
         twitter_user = session.get(me)
         token = twitter_user['access_token']
         token = twitter_user['access_token']
+        auth_user_id = twitter_user['id']
     else:
     else:
         token = os.environ.get('BEARER_TOKEN')
         token = os.environ.get('BEARER_TOKEN')
+        auth_user_id = None
     
     
     social_source = TwitterApiV2SocialGraph(token)
     social_source = TwitterApiV2SocialGraph(token)
     
     
     following_resp = social_source.get_following(user_id, 
     following_resp = social_source.get_following(user_id, 
         max_results=max_results, pagination_token=pagination_token, return_dataclass=True)
         max_results=max_results, pagination_token=pagination_token, return_dataclass=True)
     
     
+    cache_users_response(following_resp, 'following', auth_user_id, user_id = user_id, pagination_token=pagination_token)
+    
     ts = int(time.time() * 1000)
     ts = int(time.time() * 1000)
-    with open(f'{DATA_DIR}/cache/following_{user_id}_{ts}.json', 'wt') as f:
+    with open(f'{DATA_DIR}/cache/following_{user_id}_{ts}_{pagination_token}.json', 'wt') as f:
         f.write(json.dumps(cleandict(asdict(following_resp))))
         f.write(json.dumps(cleandict(asdict(following_resp))))
     
     
     #print(following_resp)
     #print(following_resp)
@@ -399,8 +765,10 @@ def get_followers_user (user_id, me=None, max_results=1000, pagination_token=Non
     if me:
     if me:
         twitter_user = session.get(me)
         twitter_user = session.get(me)
         token = twitter_user['access_token']
         token = twitter_user['access_token']
+        auth_user_id = twitter_user['id']
     else:
     else:
         token = os.environ.get('BEARER_TOKEN')
         token = os.environ.get('BEARER_TOKEN')
+        auth_user_id = None
     
     
     use_cache = False # this concept is broken for now
     use_cache = False # this concept is broken for now
     
     
@@ -418,6 +786,8 @@ def get_followers_user (user_id, me=None, max_results=1000, pagination_token=Non
         
         
         print(f'followers cache for {user_id}: {ts}')
         print(f'followers cache for {user_id}: {ts}')
         
         
+        cache_users_response(followers_resp, 'followers', auth_user_id, user_id = user_id, pagination_token=pagination_token)
+        
         with open(f'{DATA_DIR}/cache/followers_{user_id}_{ts}.json', 'wt') as f:
         with open(f'{DATA_DIR}/cache/followers_{user_id}_{ts}.json', 'wt') as f:
             json.dump(cleandict(asdict(followers_resp)), f, indent=2)
             json.dump(cleandict(asdict(followers_resp)), f, indent=2)
         
         
@@ -443,12 +813,15 @@ def get_followers_user (user_id, me=None, max_results=1000, pagination_token=Non
     return collection_page
     return collection_page
 
 
 def register_content_sources ():
 def register_content_sources ():
+    init_cache_db()
+    
     register_content_source('twitter:tweets', get_tweets_collection, id_pattern='')
     register_content_source('twitter:tweets', get_tweets_collection, id_pattern='')
     register_content_source('twitter:tweet:', get_tweet_item, id_pattern='(?P<tweet_id>\d+)')
     register_content_source('twitter:tweet:', get_tweet_item, id_pattern='(?P<tweet_id>\d+)')
+    register_content_source('twitter:tweet:', get_tweet_embed, id_pattern='(?P<tweet_id>\d+)')
     register_content_source('twitter:bookmarks:', get_bookmarks_feed, id_pattern='(?P<user_id>\d+)')
     register_content_source('twitter:bookmarks:', get_bookmarks_feed, id_pattern='(?P<user_id>\d+)')
     register_content_source('twitter:feed:user:', get_user_feed, id_pattern='(?P<user_id>\d+)')
     register_content_source('twitter:feed:user:', get_user_feed, id_pattern='(?P<user_id>\d+)')
     register_content_source('twitter:user:', get_user, id_pattern='(?P<user_id>\d+)')
     register_content_source('twitter:user:', get_user, id_pattern='(?P<user_id>\d+)')
-    
+    register_content_source('twitter:users', get_users, id_pattern='')
     
     
     register_content_source('twitter:feed:reverse_chronological:user:', get_home_feed, id_pattern='(?P<user_id>\d+)')
     register_content_source('twitter:feed:reverse_chronological:user:', get_home_feed, id_pattern='(?P<user_id>\d+)')
     
     
@@ -457,3 +830,6 @@ def register_content_sources ():
     
     
     register_content_source('twitter:following:users:', get_following_users, id_pattern='(?P<user_id>\d+)')
     register_content_source('twitter:following:users:', get_following_users, id_pattern='(?P<user_id>\d+)')
     register_content_source('twitter:followers:user:', get_followers_user, id_pattern='(?P<user_id>\d+)')
     register_content_source('twitter:followers:user:', get_followers_user, id_pattern='(?P<user_id>\d+)')
+    
+    register_content_source('twitter:threads:user:', get_author_threads, id_pattern='(?P<user_id>\d+)')
+    

+ 47 - 178
extensions/twitter_v2_facade/facade.py

@@ -267,7 +267,6 @@ def delete_tweet_bookmark (tweet_id):
 @twitter_app.route('/tweet/<tweet_id>.html', methods=['GET'])
 @twitter_app.route('/tweet/<tweet_id>.html', methods=['GET'])
 def get_tweet_html (tweet_id):
 def get_tweet_html (tweet_id):
     
     
-    
     pagination_token = request.args.get('pagination_token')
     pagination_token = request.args.get('pagination_token')
     view = request.args.get('view', 'replies')
     view = request.args.get('view', 'replies')
     
     
@@ -451,7 +450,8 @@ def get_tweet_html (tweet_id):
 @twitter_app.route('/tweet2/<tweet_id>.html', methods=['GET'])
 @twitter_app.route('/tweet2/<tweet_id>.html', methods=['GET'])
 def get_tweet2_html (tweet_id):
 def get_tweet2_html (tweet_id):
     
     
-    
+    me = g.me
+    use_embed = int(request.args.get('embed', 0))
     pagination_token = request.args.get('pagination_token')
     pagination_token = request.args.get('pagination_token')
     view = request.args.get('view', 'replies')
     view = request.args.get('view', 'replies')
     
     
@@ -462,174 +462,17 @@ def get_tweet2_html (tweet_id):
         token = os.environ.get('BEARER_TOKEN')
         token = os.environ.get('BEARER_TOKEN')
     
     
     
     
-    tweet_source = ApiV2TweetSource(token)
-    
-    only_replies = view == 'replies'
-    
-    
     tweets = []
     tweets = []
     if not pagination_token:
     if not pagination_token:
-        tweet_page = get_content(f'twitter:tweet:{tweet_id}', me=me)
-        
-        tweets.append(tweet_page.items[0])
-    
-    
-    replies_page = get_content(f'twitter:tweets:replies:{tweet_id}', me=g.me)
-    
-    skip_embed_replies = False
-    
-    if view == 'replies':
-        replies_response = tweet_source.get_thread(tweet_id,
-                                                only_replies=True,
-                                                pagination_token = pagination_token,
-                                                return_dataclass=True)
-    elif view == 'thread':
-        skip_embed_replies = True
-        replies_response = tweet_source.get_thread(tweet_id,
-                                                only_replies=False,
-                                                author_id=tweets[0].author_id,
-                                                pagination_token = pagination_token,
-                                                return_dataclass=True)
-                                                
-    elif view == 'conversation':
-        replies_response = tweet_source.get_thread(tweet_id,
-                                                only_replies=False,
-                                                pagination_token = pagination_token,
-                                                return_dataclass=True)
-    elif view == 'tweet':
-        replies_response = None
-    
-    next_token = None
+        if use_embed:
+            tweet = get_content(f'twitter:tweet:{tweet_id}', content_source_id='twitter_v2_facade.content_source:get_tweet_embed')
+            tweets.append(tweet)
+        else:
+            tweet_page = get_content(f'twitter:tweet:{tweet_id}', me=me)
+            tweets.append(tweet_page.items[0])
     
     
-    #print("conversation meta:")
-    #print(json.dumps(tweets_response.get('meta'), indent=2))
     
     
-    if replies_response and replies_response.meta and replies_response.meta.result_count:
-    
-        includes = replies_response.includes
-        tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, g.me, expand_path=request.args.get('expand'), reply_depth=1), replies_response.data)) + tweets
-        
-        next_token = replies_response.meta.next_token
-    
-    # this method is OK except it doesn't work if there are no replies.
-    #tweets.append(tweet_model(includes, list(filter(lambda t: t['id'] == tweet_id, includes.get('tweets')))[0], me))
-
-    
-    #related_tweets = [] # derived from includes
-    
-    tweets.reverse()
-    
-    
-    
-    query = {}
-    
-    if next_token:
-        query = {
-            **query,
-            # FIXME only_replies
-            'next_data_url': url_for('.get_tweet_html', tweet_id=tweet_id, pagination_token=next_token, only_replies = '1' if only_replies else '0', author_id = tweets[0].author_id),
-            'next_page_url': url_for('.get_tweet_html', tweet_id=tweet_id, view=view, pagination_token=next_token)
-        }
-        
-    user = {
-
-    }
-    
-    if 'HX-Request' in request.headers:
-
-        
-        # console.log(res.tweets.map(t => t.text).join("\n\n-\n\n"))
-        return render_template('partial/tweets-timeline.html', user = user, tweets = tweets, query = query)
-    else:
-        page_nav = [
-            dict(
-                href=url_for('.get_tweet_html', tweet_id=tweets[0].conversation_id, view='thread'),
-                label = 'author thread',
-                order = 10
-            ),
-            dict(
-                href = url_for('.get_tweet_html', tweet_id=tweets[0].conversation_id, view='conversation'),
-                label = 'full convo',
-                order = 20
-            )
-        ]
-        
-        tweet = tweets_response.data[0]
-        user = list(filter(lambda u: u.id == tweet.author_id, tweets_response.includes.users))[0]
-        
-        source_url = f'https://twitter.com/{user.username}/status/{tweet_id}'
-        title = f'Tweet by {user.name} at {tweet.created_at}'
-        
-        opengraph_info = dict(
-            type = 'webpage', # threads might be article
-            url = source_url,
-            title = title,
-            description = tweet.text,
-            image = user.profile_image_url
-        )
-        
-        
-        if view == 'replies':
-            tweet = tweets[0]
-            
-            if tweet.id == '1608510741941989378':
-                unreplied = [
-                    UnrepliedSection(
-                        description = "Not clear what GS is still.",
-                        span = (40, 80)
-                    )
-                ]
-                tweet = replace(tweet,
-                    unreplied = unreplied
-                    )
-            
-            expand_parts = request.args.get('expand')
-            if expand_parts:
-                expand_parts = expand_parts.split(',')
-            
-            def reply_to_thread_item (fi):
-                nonlocal expand_parts
-                
-                if fi.id == '1609714342211244038':
-                    print(f'reply_to_thread_item id={fi.id}')
-                    unreplied = [
-                        UnrepliedSection(
-                            description = "Is there proof of this claim?",
-                            span = (40, 80)
-                        )
-                    ]
-                    fi = replace(fi,
-                        unreplied = unreplied
-                        )
-                
-                children = None
-                
-                if expand_parts and len(expand_parts) and fi.id == expand_parts[0]:
-                    expand_parts = expand_parts[1:]
-                    
-                    print(f'getting expanded replied for tweet={fi.id}')
-                    
-                    expanded_replies_response = tweet_source.get_thread(fi.id,
-                                                only_replies=True,
-                                                return_dataclass=True)
-                    if expanded_replies_response.data:
-                        print('we got expanded responses data')
-                        
-                        children =  list(map(lambda t: tweet_model_dc_vm(expanded_replies_response.includes, t, g.me, expand_path=request.args.get('expand'), reply_depth=1), expanded_replies_response.data))
-                        children = list(map(reply_to_thread_item, children))
-                
-                
-                return ThreadItem(feed_item=fi, children=children)
-                
-            children = list(map(reply_to_thread_item, tweets[1:]))
-            
-            root = ThreadItem(
-                feed_item = tweet,
-                children = children
-            )
-            return render_template('tweet-thread.html', user = user, root = root, query = query, page_nav=page_nav, skip_embed_replies=skip_embed_replies, opengraph_info=opengraph_info)
-        else:
-            return render_template(f'tweet-collection{theme_variant}.html', user = user, tweets = tweets, query = query, page_nav=page_nav, skip_embed_replies=skip_embed_replies, opengraph_info=opengraph_info)
+    return render_template(f'tweet-collection{theme_variant}.html', user = {}, tweets = tweets, query = {})
 
 
 
 
 
 
@@ -644,7 +487,7 @@ def get_followers_html (user_id):
         'pagination_token': request.args.get('pagination_token')
         'pagination_token': request.args.get('pagination_token')
     })
     })
     
     
-    followers_page = get_content(f'twitter:followers:user:{user_id}', **content_params)
+    followers_page = get_content(f'twitter:followers:user:{user_id}', me=me, **content_params)
     
     
     followers = followers_page.items
     followers = followers_page.items
     
     
@@ -671,7 +514,7 @@ def get_following_html (user_id):
         'pagination_token': request.args.get('pagination_token')
         'pagination_token': request.args.get('pagination_token')
     })
     })
     
     
-    following_page = get_content(f'twitter:following:users:{user_id}', **content_params)
+    following_page = get_content(f'twitter:following:users:{user_id}', me=me, **content_params)
     
     
     following = following_page.items
     following = following_page.items
     
     
@@ -774,7 +617,8 @@ def get_timeline_home_html (variant = "reverse_chronological", pagination_token=
         'pagination_token': pagination_token,
         'pagination_token': pagination_token,
         'since_id': request.args.get('since_id'),
         'since_id': request.args.get('since_id'),
         'until_id': request.args.get('until_id'),
         'until_id': request.args.get('until_id'),
-        'end_time': request.args.get('end_time')
+        'end_time': request.args.get('end_time'),
+        'start_time': request.args.get('start_time')
     })
     })
     
     
     timeline_page = get_content(f'twitter:feed:reverse_chronological:user:{user_id}', me=g.me, **tq)
     timeline_page = get_content(f'twitter:feed:reverse_chronological:user:{user_id}', me=g.me, **tq)
@@ -898,6 +742,15 @@ def get_bookmarks_old_html ():
 from hogumathi_app.content_system import get_content
 from hogumathi_app.content_system import get_content
 
 
 
 
+@twitter_app.route('/profile/<user_id>/threads.html', methods=['GET'])
+def get_threads_html (user_id):
+    category = request.args.get('category')
+    
+    collection = get_content(f'twitter:threads:user:{user_id}')
+    
+    print(collection)
+    
+    return 'ok'
 
 
 @twitter_app.route('/profile/<user_id>.html', methods=['GET'])
 @twitter_app.route('/profile/<user_id>.html', methods=['GET'])
 def get_profile_html (user_id):
 def get_profile_html (user_id):
@@ -915,16 +768,23 @@ def get_profile_html (user_id):
     output_format = request.args.get('format', 'html')
     output_format = request.args.get('format', 'html')
     
     
     pagination_token = request.args.get('pagination_token')
     pagination_token = request.args.get('pagination_token')
-    exclude_replies = request.args.get('exclude_replies', '0')
-    exclude_retweets = request.args.get('exclude_retweets', '0')
-    
-    
+    exclude_replies = int(request.args.get('exclude_replies', 0))
+    exclude_retweets = int(request.args.get('exclude_retweets', 0))
+    max_results = int(request.args.get('limit', 10))
+    since_id = request.args.get('since_id')
+    until_id  = request.args.get('until_id')
+    start_time = request.args.get('start_time')
+    end_time  = request.args.get('end_time')
     
     
     query = cleandict({
     query = cleandict({
         'pagination_token': pagination_token,
         'pagination_token': pagination_token,
         'exclude_replies': exclude_replies,
         'exclude_replies': exclude_replies,
         'exclude_retweets': exclude_retweets,
         'exclude_retweets': exclude_retweets,
-        'format': output_format
+        'max_results': max_results,
+        'since_id': since_id,
+        'until_id': until_id,
+        'start_time': start_time,
+        'end_time': end_time
     })
     })
     
     
     collection_page = get_content(f'twitter:feed:user:{user_id}', me=me, **query)
     collection_page = get_content(f'twitter:feed:user:{user_id}', me=me, **query)
@@ -932,15 +792,19 @@ def get_profile_html (user_id):
     tweets = collection_page.items
     tweets = collection_page.items
     next_token = collection_page.next_token
     next_token = collection_page.next_token
     
     
+    # FIXME janky
+    query['pagination_token'] = next_token
+    
     if next_token:
     if next_token:
         query = {
         query = {
             **query,
             **query,
             
             
-            'next_data_url': url_for('.get_profile_html', user_id=user_id, pagination_token=next_token, exclude_replies=exclude_replies, exclude_retweets=exclude_retweets),
-            'next_page_url': url_for('.get_profile_html', user_id=user_id , pagination_token=next_token, exclude_replies=exclude_replies, exclude_retweets=exclude_retweets)
+            'format': output_format,
+            
+            'next_data_url': url_for('.get_profile_html', user_id=user_id, **query),
+            'next_page_url': url_for('.get_profile_html', user_id=user_id , **query)
         }
         }
     
     
-    
         
         
     
     
     if output_format == 'feed.json':
     if output_format == 'feed.json':
@@ -993,6 +857,11 @@ def get_profile_html (user_id):
                 href = url_for('twitter_v2_facade.get_followers_html', user_id=user.id),
                 href = url_for('twitter_v2_facade.get_followers_html', user_id=user.id),
                 label = 'Followers',
                 label = 'Followers',
                 order = 50,
                 order = 50,
+            ),
+            dict (
+                href = url_for('twitter_v2_facade.get_threads_html', user_id=user.id),
+                label = 'Threads',
+                order = 55,
             )
             )
         ]
         ]
         
         
@@ -1001,7 +870,7 @@ def get_profile_html (user_id):
                 if uid.startswith('twitter:'):
                 if uid.startswith('twitter:'):
                     page_nav += [
                     page_nav += [
                         dict(
                         dict(
-                            href = url_for('twitter_v2_facade.get_profile_html', user_id=user_id, me=uid),
+                            href = url_for('twitter_v2_facade.get_profile_html', user_id=user_id, me=uid, **query),
                             label = f'View as {acct["id"]}',
                             label = f'View as {acct["id"]}',
                             order = 1000,
                             order = 1000,
                         )
                         )

+ 6 - 1
extensions/twitter_v2_facade/oauth2_login.py

@@ -30,7 +30,7 @@ app_secret_key = os.environ.get("TWITTER_CONSUMER_SECRET")
 
 
 
 
 
 
-TWITTER_SCOPES = ["bookmark.read", "bookmark.write", "tweet.read", "tweet.write", "dm.read", "users.read", "like.read", "offline.access", "follows.read"]
+TWITTER_SCOPES = ["bookmark.read", "bookmark.write", "tweet.read", "tweet.write", "dm.read", "users.read", "like.read", "like.write", "offline.access", "follows.read"]
 
 
 oauth2_login = Blueprint('oauth2_login', 'oauth2_login',
 oauth2_login = Blueprint('oauth2_login', 'oauth2_login',
     static_folder='static',
     static_folder='static',
@@ -63,6 +63,10 @@ def add_me ():
         #       the later will fail as of now. should be rare since static resources aren't authenticated 
         #       the later will fail as of now. should be rare since static resources aren't authenticated 
         #       and we don't use APIs w/ JS.
         #       and we don't use APIs w/ JS.
         refresh_token()
         refresh_token()
+    
+    print('DEBUG: twitter_user')
+    print(g.twitter_user)
+    
 
 
 @oauth2_login.context_processor
 @oauth2_login.context_processor
 def inject_me():
 def inject_me():
@@ -148,6 +152,7 @@ def get_loggedin_html ():
     me = 'twitter:{}'.format(user_id)
     me = 'twitter:{}'.format(user_id)
     
     
     session[ me ] = {
     session[ me ] = {
+        'expires_in': token['expires_in'],
         'expires_at': expires_at,
         'expires_at': expires_at,
         'access_token': access,
         'access_token': access,
         'refresh_token': refresh,
         'refresh_token': refresh,

+ 23 - 3
extensions/twitter_v2_facade/view_model.py

@@ -2,12 +2,15 @@ from dataclasses import replace
 
 
 from flask import g, request
 from flask import g, request
 
 
+import sqlite3
+
 from twitter_v2.types import Tweet, TweetExpansions
 from twitter_v2.types import Tweet, TweetExpansions
 
 
 from hogumathi_app.view_model import FeedServiceUser, FeedItem, FeedItemAction, CollectionPage, PublicMetrics, Card, MediaItem
 from hogumathi_app.view_model import FeedServiceUser, FeedItem, FeedItemAction, CollectionPage, PublicMetrics, Card, MediaItem
 
 
 from . import oauth2_login
 from . import oauth2_login
 
 
+
 url_for = oauth2_login.url_for_with_me
 url_for = oauth2_login.url_for_with_me
 
 
 def user_model_dc (user, my_url_for=url_for):
 def user_model_dc (user, my_url_for=url_for):
@@ -73,19 +76,35 @@ def tweet_model_dc_vm (includes: TweetExpansions, tweet: Tweet, me, my_url_for=u
         vr = actions['view_replies']
         vr = actions['view_replies']
         url = my_url_for(vr.route, **vr.route_params)
         url = my_url_for(vr.route, **vr.route_params)
     
     
+    
+    is_bookmarked = None
+    if me:
+        cache_db = sqlite3.connect('.data/twitter_v2_cache.db')
+        auth_user_id = me[len('twitter:'):]
+        # this will cache deleted bookmarks. we need a next level abstraction over events / aggregate.
+        is_bookmarked = cache_db.execute('select count(*) from tweet t, query q where q.rowid = t.query_id and q.query_type=? and t.id=? and q.auth_user_id=?', ['bookmarks', tweet.id, auth_user_id]).fetchone()[0] and True
+        cache_db.close()
+    
     if my_g.get('twitter_user'):
     if my_g.get('twitter_user'):
         actions.update(
         actions.update(
-            bookmark = FeedItemAction('twitter_v2_facade.post_tweet_bookmark', {'tweet_id': tweet.id}),
-            delete_bookmark = FeedItemAction('twitter_v2_facade.delete_tweet_bookmark', {'tweet_id': tweet.id}),
-            
             retweet = FeedItemAction('twitter_v2_facade.post_tweet_retweet', {'tweet_id': tweet.id})
             retweet = FeedItemAction('twitter_v2_facade.post_tweet_retweet', {'tweet_id': tweet.id})
             )
             )
+        if is_bookmarked:
+            actions.update(
+                delete_bookmark = FeedItemAction('twitter_v2_facade.delete_tweet_bookmark', {'tweet_id': tweet.id})
+                )
+        else:
+            actions.update(
+                bookmark = FeedItemAction('twitter_v2_facade.post_tweet_bookmark', {'tweet_id': tweet.id})
+                )
     
     
     if my_g.get('twitter_live_enabled'):
     if my_g.get('twitter_live_enabled'):
         actions.update(
         actions.update(
             view_activity = FeedItemAction('twitter_v2_live_facade.get_tweet_activity_html', {'tweet_id': tweet.id})
             view_activity = FeedItemAction('twitter_v2_live_facade.get_tweet_activity_html', {'tweet_id': tweet.id})
             )
             )
     
     
+    
+
     t = FeedItem(
     t = FeedItem(
         id = tweet.id,
         id = tweet.id,
         text = tweet.text,
         text = tweet.text,
@@ -109,6 +128,7 @@ def tweet_model_dc_vm (includes: TweetExpansions, tweet: Tweet, me, my_url_for=u
         #'is_edited': len(tweet['edit_history_tweet_ids']) > 1
         #'is_edited': len(tweet['edit_history_tweet_ids']) > 1
         
         
         actions = actions,
         actions = actions,
+        is_bookmarked = is_bookmarked
     )
     )
     
     
     if reply_depth:
     if reply_depth:

+ 38 - 5
hogumathi_app/__main__.py

@@ -2,10 +2,11 @@ import os
 import sys
 import sys
 from importlib.util import find_spec
 from importlib.util import find_spec
 from configparser import ConfigParser
 from configparser import ConfigParser
-
+import threading
 import json
 import json
+import time
 
 
-
+import schedule
 import requests
 import requests
 
 
 from flask import Flask, g, redirect, url_for, render_template, jsonify, request, send_from_directory
 from flask import Flask, g, redirect, url_for, render_template, jsonify, request, send_from_directory
@@ -17,6 +18,8 @@ from . import content_system as h_cs
 from . import item_collections, view_model as h_vm
 from . import item_collections, view_model as h_vm
 from .item_collections import item_collections_bp
 from .item_collections import item_collections_bp
 
 
+from . import schedule_system as h_sched
+
 theme_bootstrap5_enabled = False
 theme_bootstrap5_enabled = False
 if find_spec('theme_bootstrap5'):
 if find_spec('theme_bootstrap5'):
     from theme_bootstrap5 import hogumathi_theme_bootstrap5_bp
     from theme_bootstrap5 import hogumathi_theme_bootstrap5_bp
@@ -97,6 +100,13 @@ else:
     print('instagram module not found.')
     print('instagram module not found.')
     instagram_enabled = False
     instagram_enabled = False
 
 
+if find_spec('git_facade'):
+    import git_facade
+    git_enabled = True
+else:
+    print('git module not found.')
+    git_enabled = False
+
 if find_spec('videojs'):
 if find_spec('videojs'):
     from videojs import videojs_bp
     from videojs import videojs_bp
     videojs_enabled = True
     videojs_enabled = True
@@ -313,9 +323,32 @@ if __name__ == '__main__':
         feeds_facade.register_content_sources()
         feeds_facade.register_content_sources()
         api.register_blueprint(feeds, url_prefix='/feeds')
         api.register_blueprint(feeds, url_prefix='/feeds')
     
     
-    
+    if git_enabled:
+        git_facade.register_content_sources()
+        
     #CORS(api)
     #CORS(api)
     
     
-
+    sched_app = h_sched.ScheduleApplication()
+    print(f'created schedule app: {sched_app}')
+    
+    running = True
+    def schedule_main():
+        
+        print(f'running schedule app: {sched_app}')
+        
+        while running:
+            sched_app.run_pending()
+            time.sleep(1)
+    
+    def say_something ():
+        print ('-- something')
+    
+    sched_app.schedule_job(schedule.every(61).seconds, say_something)
+    
+    schedule_thread = threading.Thread(target=schedule_main)
+    schedule_thread.start()
+    
+    api.run(port=PORT, host=HOST)
     
     
-    api.run(port=PORT, host=HOST)
+    running = False
+    schedule_thread.join()

+ 67 - 4
hogumathi_app/content_system.py

@@ -61,9 +61,17 @@ class ContentSystem:
         
         
         return args, kwargs
         return args, kwargs
     
     
-    @lru_cache(maxsize=64) # NOTE: mutating return value mutates cached value
-    def get_content (self, content_id, content_source_id=None, ttl_hash=get_ttl_hash(60), *extra_args, **extra_kwargs):
-        print(f'get_content {content_id}')
+    
+    def resolve_content_source (self, content_id, content_source_id=None, *extra_args, **extra_kwargs):
+        """
+        Resolve possible content sources given the parameters used for get_content.
+        
+        Returns a generator, typically the first will be used.
+        
+        Allows for content modules to determine if sources are available,
+        without fetching the content itself.
+        """
+    
         #source_ids = list(self.content_sources.keys())
         #source_ids = list(self.content_sources.keys())
         #source_ids.sort(key=lambda id_prefix: len(id_prefix), reverse=True)
         #source_ids.sort(key=lambda id_prefix: len(id_prefix), reverse=True)
         
         
@@ -105,6 +113,23 @@ class ContentSystem:
             # we find them all...
             # we find them all...
             # yet we don't want intelligence about the type of content returned.
             # yet we don't want intelligence about the type of content returned.
             # idea: class BulkResponse(dict): pass
             # idea: class BulkResponse(dict): pass
+            yield content_source_fn, args, kwargs
+            
+    
+    @lru_cache(maxsize=64) 
+    def get_content (self, content_id, content_source_id=None, ttl_hash=get_ttl_hash(60), *extra_args, **extra_kwargs):
+        """
+        NOTE: mutating return value mutates cached value
+        """
+        
+        print(f'get_content {content_id}')
+
+        for content_source_fn, args, kwargs in self.resolve_content_source(
+                content_id,
+                content_source_id=content_source_id,
+                *extra_args,
+                **extra_kwargs):
+            
             content = content_source_fn(*args, **kwargs)
             content = content_source_fn(*args, **kwargs)
             
             
             if content:
             if content:
@@ -128,7 +153,7 @@ class ContentSystem:
         
         
         return self.get_all_content2(content_ids, enable_bulk_fetch=enable_bulk_fetch)
         return self.get_all_content2(content_ids, enable_bulk_fetch=enable_bulk_fetch)
         
         
-    def get_all_content2 (self, content_collection_ids, content_args = None, max_results = None, enable_bulk_fetch=False):
+    def get_all_content2 (self, content_collection_ids, content_args = None, enable_bulk_fetch=False):
         """
         """
         Takes a list of collection IDs and content_args is a map of (args, kwargs) keyed by collection ID.
         Takes a list of collection IDs and content_args is a map of (args, kwargs) keyed by collection ID.
         
         
@@ -236,6 +261,44 @@ class ContentSystem:
 
 
 
 
 
 
+class ObjectCache:
+    
+    create_stmt = """
+        create table content (
+          provider text,
+          id text,
+          dt datetime,
+          args text, -- could hash
+          type text,
+          data blob,
+          unique (provider, id, dt, args)
+        )
+    """
+    
+    insert_stmt = """
+        INSERT INTO content (dt, provider, id, args, type, data)
+        VALUES (current_timestamp, ?, ?, ?, ?, ?)
+    """
+    
+    select_latest_stmt = """
+        SELECT * from content
+        WHERE {where_sql}
+        GROUP BY provider, id, dt, args
+        HAVING dt = max(dt)
+        
+    """
+    
+    def __init__ (self, db_path):
+        self.db_path = db_path
+    
+    def put (self, key, value):
+        pass
+    
+    def get (self, key):
+        pass
+
+
+
 
 
 # The app was coded before we turned this into a class...
 # The app was coded before we turned this into a class...
 # so we proxy calls with the old interface to this default instance.
 # so we proxy calls with the old interface to this default instance.

+ 15 - 1
hogumathi_app/item_collections.py

@@ -10,6 +10,8 @@ import json
 from flask import request, g, jsonify, render_template,  Blueprint, url_for, session
 from flask import request, g, jsonify, render_template,  Blueprint, url_for, session
 
 
 from twitter_v2.api import ApiV2TweetSource
 from twitter_v2.api import ApiV2TweetSource
+
+from . import view_model as h_vm
 from .view_model import FeedItem, CollectionPage, cleandict
 from .view_model import FeedItem, CollectionPage, cleandict
 
 
 from .content_system import get_content, get_all_content, register_content_source
 from .content_system import get_content, get_all_content, register_content_source
@@ -484,4 +486,16 @@ def get_collection2_html (collection_id):
         else:
         else:
             if pagination_token:
             if pagination_token:
                 query['next_page_url'] = url_for('.get_collection2_html', me=me, collection_id=collection_id, pagination_token=pagination_token)
                 query['next_page_url'] = url_for('.get_collection2_html', me=me, collection_id=collection_id, pagination_token=pagination_token)
-            return render_template('tweet-collection.html', tweets = feed_items, user = {}, query = query)
+            
+            source_url = url_for('.get_collection2_html', collection_id=collection_id, _external=True)
+            title = f'Collection: {collection_id} on Hogumathi'
+            
+            opengraph_info = dict(
+                type = 'webpage', # threads might be article
+                url = source_url,
+                title = title,
+                description = title,
+                #image = user.profile_image_url
+            )
+            return render_template('tweet-collection.html', tweets = feed_items, user = {}, query = query, opengraph_info=opengraph_info)
+            

+ 172 - 0
hogumathi_app/schedule_system.py

@@ -0,0 +1,172 @@
+import datetime
+import schedule
+import time
+from uuid import uuid4
+
+from typing import Optional, Any
+from dataclasses import dataclass
+
+@dataclass
+class ScheduledJob:
+    """
+    Basically an import of our schedule library:
+    
+    https://schedule.readthedocs.io/en/stable/_modules/schedule.html#Job
+    """
+    id: str
+    
+    interval: int  # pause interval * unit between runs
+    
+    
+    latest: Optional[int] = None  # upper limit to the interval
+    #job_func: Optional[functools.partial] = None  # the job job_func to run
+
+    # time units, e.g. 'minutes', 'hours', ...
+    unit: Optional[str] = None
+
+    # optional time at which this job runs
+    at_time: Optional[datetime.time] = None
+
+    # optional time zone of the self.at_time field. Only relevant when at_time is not None
+    at_time_zone:Optional[Any] = None # str or pytz.BaseTzInfo
+
+    # datetime of the last run
+    last_run: Optional[datetime.datetime] = None
+
+    # datetime of the next run
+    next_run: Optional[datetime.datetime] = None
+
+    # timedelta between runs, only valid for
+    period: Optional[datetime.timedelta] = None # period vs. interval?
+
+    # Specific day of the week to start on
+    start_day: Optional[str] = None
+
+    # optional time of final run
+    cancel_after: Optional[datetime.datetime] = None
+    
+    job_fn:Optional[Any] = None
+
+
+class ScheduleApplication:
+    """
+    Maintains a persistent schedule of jobs to run and manages execution.
+    """
+    
+    def __init__ (self):
+        
+        self.scheduler = schedule.default_scheduler
+        self.jobs = {}
+        
+    
+    def schedule_job (self, sched_job_spec: schedule.Job, job_fn: str):
+        """
+        Usage: 
+        
+        schedule_job(schedule.every(3).weeks.at(...), 'module.fn')
+        
+        module.fn needs to be ready to run when the job scheduler comes online.
+        Can't be something like a partial, needs to be able to be recreated at startup.
+        
+        -
+        
+        we can persist a schedule.Job using its properties,
+        and restore it upon startup.
+        
+        https://schedule.readthedocs.io/en/stable/_modules/schedule.html#Job
+        """
+        
+        job_id = uuid4().hex
+        
+        sched_job_spec._schedule_next_run()
+        
+        job = ScheduledJob(
+            id = job_id,
+            interval = sched_job_spec.interval,
+            latest = sched_job_spec.latest,
+            unit = sched_job_spec.unit,
+            at_time = sched_job_spec.at_time,
+            at_time_zone = sched_job_spec.at_time_zone,
+            next_run = sched_job_spec.next_run,
+            last_run = sched_job_spec.last_run,
+            period = sched_job_spec.period,
+            start_day = sched_job_spec.start_day,
+            cancel_after = sched_job_spec.cancel_after,
+            job_fn = job_fn
+        )
+        
+        self.jobs[ job_id ] = job
+        
+        sched_job = self._as_sched_job_spec( job )
+        
+        sched_job.scheduler = self.scheduler
+        sched_job.do( job.job_fn )
+        
+        
+        # FIXME save jobs
+        
+        return job_id
+    
+    def get_missed_jobs (self):
+        pass
+    
+    def delete_job (self, job_id):
+        del self.jobs[job_id]
+        
+        # FIXME save jobs
+        
+    def _as_sched_job_spec (self, job):
+        sched_job_spec = schedule.Job( interval=job.interval )
+        
+        sched_job_spec.latest = job.latest
+        sched_job_spec.unit = job.unit
+        sched_job_spec.at_time = job.at_time
+        sched_job_spec.at_time_zone = job.at_time_zone
+        sched_job_spec.next_run = job.next_run
+        sched_job_spec.last_run = job.last_run
+        sched_job_spec.period = job.period
+        sched_job_spec.start_day = job.start_day
+        sched_job_spec.cancel_after = job.cancel_after
+        
+        return sched_job_spec
+    
+    def restore_schedule (self):
+        
+        # FIXME load from storage
+        saved_jobs = {}
+        
+        for job_id, job in saved_jobs:
+            job_func = lambda: print(f'running job: {job_id}')
+            
+            
+            
+            # TODO check if we missed a run
+            # if next_run is in the past
+            # we want to get user input for what to do.
+            
+            # Wire and schedule for execution
+            #
+            #sched_job_spec.job_func = job_func
+            #
+            # two options:
+            # job.scheduler = ...
+            # job.do( job_func )
+            # scheduler.jobs.append(sched_job_spec)
+            
+            # FIXME we need to find a way to serialize this. Maybe just full qual name with some rules, like no partials/lambdas.
+            job_fn = None
+            
+            # replace(job, job_fn => job_fn)
+            
+            #self.jobs[ job_id ] = job
+            
+            #sched_job_spec = self._as_sched_job_spec( job )
+            
+            #sched_job_spec.scheduler = self.scheduler
+            #sched_job_spec.do( job_fn )
+            
+            
+    
+    def run_pending (self):
+        self.scheduler.run_pending()
+    

+ 2 - 1
hogumathi_app/view_model.py

@@ -140,6 +140,7 @@ class FeedItem:
     reply_depth: Optional[int] = 0
     reply_depth: Optional[int] = 0
     
     
     is_marked: Optional[bool] = None
     is_marked: Optional[bool] = None
+    is_bookmarked: Optional[bool] = None
     
     
     card: Optional[Card] = None
     card: Optional[Card] = None
     
     
@@ -181,7 +182,7 @@ class FeedItem:
 
 
 @dataclass
 @dataclass
 class ThreadItem:
 class ThreadItem:
-    feed_item: FeedItem
+    feed_item: Union[FeedItem,'Collection','RoutedMessage']
     children: Optional[List['ThreadItem']] = None
     children: Optional[List['ThreadItem']] = None
     parent: Optional['ThreadItem'] = None
     parent: Optional['ThreadItem'] = None
     parents: Optional[List['ThreadItem']] = None
     parents: Optional[List['ThreadItem']] = None

+ 67 - 2
hogumathi_app/web.py

@@ -5,7 +5,7 @@ from pathlib import Path
 import json
 import json
 import requests
 import requests
 
 
-from flask import Flask, g, redirect, url_for, render_template, jsonify, request, send_from_directory
+from flask import Flask, g, redirect, url_for, render_template, jsonify, request, send_from_directory, render_template_string
 
 
 from . import content_system as h_cs
 from . import content_system as h_cs
 from . import view_model as h_vm
 from . import view_model as h_vm
@@ -107,10 +107,75 @@ def get_content_html (content_id, content_kwargs=None):
 		
 		
 
 
 		return render_template('tweet-collection.html', tweets=content.items, user = {}, query = {})
 		return render_template('tweet-collection.html', tweets=content.items, user = {}, query = {})
+	elif type(content) == list:
+		return render_template('tweet-collection.html', tweets=content, user = {}, query = {})
 	else:
 	else:
 		return jsonify(content)
 		return jsonify(content)
 
 
 @api.get('/content/def456.html')
 @api.get('/content/def456.html')
 def get_def456_html ():
 def get_def456_html ():
 
 
-	return get_content_html('brand:ispoogedaily')
+	return get_content_html('brand:ispoogedaily')
+    
+@api.get('/content/search.html')
+def get_content_search_html ():
+    source_id = request.args.get('source')
+    q = request.args.get('q')
+    pagination_token = request.args.get('pagination_token')
+    max_results = int(request.args.get('limit', 10))
+    
+    # search object store
+    # search origin sources
+    # populate object store with results
+    
+    # similar to how messages app works. Multiple sources within one app.
+    # That app does not cache results tho, does an online search with each query.
+    
+    
+    return 'ok'
+    
+@api.get('/schedule/jobs.html')
+def get_schedule_jobs_html ():
+    
+    template = """
+    {% extends "base-bs.html" %}
+    
+    {% block content %}
+    
+    {% endblock %}
+    """
+    
+    view_model = {
+        'jobs': [
+            {
+                'id': '1234',
+                'next_run': '',
+                'last_run': '',
+                'interval': 1,
+                'unit': 'minutes',
+                'period': '', # period vs. interval?
+                'latest': '',
+                'start_day': '',
+                'cancel_after': ''
+            }
+        ]
+    }
+    
+    return render_template_string(template, **view_model)
+    
+@api.get('/schedule/create-job.html')
+def get_schedule_create_job_html ():
+    
+    template = """
+    {% extends "base-bs.html" %}
+    
+    {% block content %}
+    
+    {% endblock %}
+    """
+    
+    view_model = {
+    
+    }
+    
+    return render_template_string(template, **view_model)

+ 59 - 3
lib/twitter_v2/api.py

@@ -283,6 +283,19 @@ class ApiV2TweetSource:
         
         
         return result
         return result
     
     
+    def delete_retweet (self, tweet_id, user_id):
+        
+        url = "https://api.twitter.com/2/users/{}/retweets/{}".format(user_id, tweet_id)
+   
+        headers = {
+            'Authorization': 'Bearer {}'.format(self.token)
+        }
+        
+        response = requests.delete(url, headers=headers)
+        result = json.loads(response.text)
+        
+        return result
+    
     def bookmark (self, tweet_id, user_id):
     def bookmark (self, tweet_id, user_id):
         
         
         url = "https://api.twitter.com/2/users/{}/bookmarks".format(user_id)
         url = "https://api.twitter.com/2/users/{}/bookmarks".format(user_id)
@@ -312,12 +325,15 @@ class ApiV2TweetSource:
         }
         }
         
         
         response = requests.delete(url, headers=headers)
         response = requests.delete(url, headers=headers)
+        
+        print(response.status_code)
+        
         result = json.loads(response.text)
         result = json.loads(response.text)
         
         
         return result
         return result
     
     
     
     
-    def get_home_timeline (self, user_id, variant = 'reverse_chronological', max_results = 10, pagination_token = None, since_id = None, until_id = None, end_time = None) -> TweetSearchResponse:
+    def get_home_timeline (self, user_id, variant = 'reverse_chronological', max_results = 10, pagination_token = None, since_id = None, until_id = None, end_time = None, start_time=None) -> TweetSearchResponse:
         """
         """
         Get a user's timeline as viewed by the user themselves.
         Get a user's timeline as viewed by the user themselves.
         """
         """
@@ -325,18 +341,21 @@ class ApiV2TweetSource:
         path = 'users/{}/timelines/{}'.format(user_id, variant)
         path = 'users/{}/timelines/{}'.format(user_id, variant)
         
         
         return self.get_timeline(path, 
         return self.get_timeline(path, 
-            max_results=max_results, pagination_token=pagination_token, since_id=since_id, until_id=until_id, end_time=end_time, return_dataclass=True) 
+            max_results=max_results, pagination_token=pagination_token, since_id=since_id, until_id=until_id, end_time=end_time, start_time=start_time, return_dataclass=True) 
     
     
     def get_timeline (self, path,
     def get_timeline (self, path,
         max_results = 10, pagination_token = None, since_id = None,
         max_results = 10, pagination_token = None, since_id = None,
         until_id = None,
         until_id = None,
         end_time = None,
         end_time = None,
+        start_time = None,
         non_public_metrics = False,
         non_public_metrics = False,
         exclude_replies=False,
         exclude_replies=False,
         exclude_retweets=False,
         exclude_retweets=False,
         return_dataclass=False):
         return_dataclass=False):
         """
         """
         Get any timeline, including custom curated timelines built by Tweet Deck / ApiV11.
         Get any timeline, including custom curated timelines built by Tweet Deck / ApiV11.
+        
+        Max 3,200 for Essential access, and 800 if exclude_replies=True
         """
         """
         
         
         token = self.token
         token = self.token
@@ -393,6 +412,9 @@ class ApiV2TweetSource:
         if end_time:
         if end_time:
             params['end_time'] = end_time
             params['end_time'] = end_time
         
         
+        if start_time:
+            params['start_time'] = start_time
+        
         headers = {"Authorization": "Bearer {}".format(token)}
         headers = {"Authorization": "Bearer {}".format(token)}
         
         
         #headers = {"Authorization": "access_token {}".format(access_token)}
         #headers = {"Authorization": "access_token {}".format(access_token)}
@@ -401,7 +423,7 @@ class ApiV2TweetSource:
         response_json = json.loads(response.text)
         response_json = json.loads(response.text)
         
         
         try:
         try:
-            print(json.dumps(response_json, indent = 2))
+            #print(json.dumps(response_json, indent = 2))
             typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
             typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
         except:
         except:
             print('error converting response to dataclass')
             print('error converting response to dataclass')
@@ -436,6 +458,9 @@ class ApiV2TweetSource:
     def get_user_timeline (self, user_id,
     def get_user_timeline (self, user_id,
                           max_results = 10, pagination_token = None,
                           max_results = 10, pagination_token = None,
                           since_id = None,
                           since_id = None,
+                          until_id = None,
+                          start_time = None,
+                          end_time = None,
                           non_public_metrics=False,
                           non_public_metrics=False,
                           exclude_replies=False,
                           exclude_replies=False,
                           exclude_retweets=False,
                           exclude_retweets=False,
@@ -447,6 +472,7 @@ class ApiV2TweetSource:
         
         
         return self.get_timeline(path, 
         return self.get_timeline(path, 
             max_results=max_results, pagination_token=pagination_token, since_id=since_id,
             max_results=max_results, pagination_token=pagination_token, since_id=since_id,
+            until_id=until_id,start_time=start_time,
             non_public_metrics = non_public_metrics,
             non_public_metrics = non_public_metrics,
             exclude_replies=exclude_replies, exclude_retweets=exclude_retweets, return_dataclass=return_dataclass)
             exclude_replies=exclude_replies, exclude_retweets=exclude_retweets, return_dataclass=return_dataclass)
     
     
@@ -548,6 +574,15 @@ class ApiV2TweetSource:
         if since_id:
         if since_id:
             params['since_id'] = since_id
             params['since_id'] = since_id
         
         
+        if until_id:
+            params['until_id'] = until_id
+            
+        if start_time:
+            params['start_time'] = start_time
+            
+        if end_time:
+            params['end_time'] = end_time
+        
         if sort_order:
         if sort_order:
             params['sort_order'] = sort_order
             params['sort_order'] = sort_order
         
         
@@ -715,6 +750,8 @@ class ApiV2TweetSource:
         
         
         path = "users/{}/liked_tweets".format(user_id)
         path = "users/{}/liked_tweets".format(user_id)
         
         
+        print('get_liked_tweets')
+        
         return self.get_timeline(path, 
         return self.get_timeline(path, 
             max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
             max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
         
         
@@ -760,6 +797,25 @@ class ApiV2TweetSource:
         #  POST /2/users/:user_id/likes
         #  POST /2/users/:user_id/likes
         #  {id: tweet_id}
         #  {id: tweet_id}
         return
         return
+        
+    def delete_like (self, tweet_id, user_id):
+        
+        url = "https://api.twitter.com/2/users/{}/likes/{}".format(user_id, tweet_id)
+   
+        headers = {
+            'Authorization': 'Bearer {}'.format(self.token)
+        }
+        
+        response = requests.delete(url, headers=headers)
+        
+        print(response.status_code)
+        
+        result = json.loads(response.text)
+        
+        return result
+    
+    
+    
     
     
     def get_list_tweets (self, list_id):
     def get_list_tweets (self, list_id):
         # GET /2/lists/:id/tweets
         # GET /2/lists/:id/tweets