Explorar o código

release v0.4.1

Harlan Iverson hai 1 ano
pai
achega
8320d26711

+ 263 - 0
extensions/bitchute_facade.py

@@ -0,0 +1,263 @@
+
+def register_content_sources ():
+    """
+    bitchute:channel:
+    bitchute:video:
+    bitchute:profile:user:
+    
+    bitchute:videos:channel:
+    bitchute:comments:video:
+    """
+    pass
+
+def get_bitchute_comments ():
+    """
+    script
+    initComments(
+    'https://commentfreely.bitchute.com',
+    'eyJwcm9maWxlX2lkIjogImFub255bW91cyIsICJvd25lcl9pZCI6ICJ2UkI1eFpXVW5EYlAiLCAiZGlzcGxheV9uYW1lIjogImFub255bW91cyIsICJ0aHJlYWRfaWQiOiAiYmNfMnFDQ3dyWm9kcXVxIiwgImljb25fdXJsIjogIi9zdGF0aWMvdjE0MS9pbWFnZXMvYmxhbmstcHJvZmlsZS5wbmciLCAiY2ZfaXNfYWRtaW4iOiAiZmFsc2UiLCAiY2hhbm5lbF9pZCI6ICJhZ0R1aVcxQWhXeHoifQ== 1f280c339d11ce063d204d66f4fe38fa938474290994899d5128d3a2ee79c471 1679618698',
+    'anonymous'
+    
+    function initComments(cf_url, cf_auth, currentUserId, 
+            profilePictureURL, commentCountDeprecated, refreshAction, isThreadAdmin, isSupporter, isBlocked) {
+
+        getComments: function(success, error) {
+        var isFirstCall = !window.getCommentsTime;
+        getCommentsTime = Date.now();
+        $.ajax({
+            type: 'post',
+            url: cf_url + '/api/get_comments/',
+            data: {
+                cf_auth: cf_auth,
+                commentCount: (isFirstCall) ? 0 : localCommentCount || -1,
+                isNameValuesArrays: true
+            },
+            success: function(comments) {
+                localCommentCount = comments.values.length;
+                commentCount = comments.normalizedCommentCount;
+                lastCallTime = comments.callTime;
+                dynamicCacheSeconds = comments.dynamicSeconds;
+                success(comments)
+            },
+            e
+    
+    Request headers:
+    
+    authority: commentfreely.bitchute.com
+    origin: https://www.bitchute.com
+    referer: https://www.bitchute.com/
+    content-type: application/x-www-form-urlencoded; charset=UTF-8
+    
+    Response
+    
+    {
+    "names": [
+        "id",
+        "parent",
+        "created",
+        "modified",
+        "content",
+        "pings",
+        "creator",
+        "fullname",
+        "created_by_admin",
+        "created_by_current_user",
+        "up_vote_count",
+        "down_vote_count",
+        "user_vote",
+        "is_new",
+        "profile_picture_url",
+        "attachments"
+    ],
+    "values": [
+        [
+            "0LAnLMLmA66dSLbK0ur25CPoPITCsId9eE46",
+            null,
+            "2023-03-24 00:19:44.371714+00:00",
+            null,
+            "Love you Blue",
+            [],
+            "mW6Q8LlCDs7Y",
+            "Joelkenimer",
+            false,
+            false,
+            5,
+            2,
+            null,
+            false,
+            "/static/v141/images/blank-profile.png",
+            []
+        ],
+        [
+            "k9KG622jF7IJ6bJvOiqpE9HdYgvl254DbE3M",
+            null,
+            "2023-03-24 00:22:12.367094+00:00",
+            null,
+            "OK pls send us a link for Odysee if that happens.",
+            [],
+            "OTjfEnZ1HrAC",
+            "danortego",
+            false,
+            false,
+            8,
+            0,
+            null,
+            false,
+            "/static/v141/images/blank-profile.png",
+            []
+        ],
+        [
+            "z3EdQTt21UCVmqeeymvuN0BV1vnvJpTs4COR",
+            null,
+            "2023-03-24 00:23:40.374960+00:00",
+            null,
+            "I'm very happy about that C21 and Ryu",
+            [],
+            "qgjwPrLKPkDN",
+            "MayQ",
+            false,
+            false,
+            4,
+            1,
+            null,
+            false,
+            "/static/v141/images/blank-profile.png",
+            []
+        ],
+        [
+            "wdGCZbdx9aCwZ4fDGvAg0tk58wEKS1L481Te",
+            null,
+            "2023-03-24 00:24:06.963831+00:00",
+            null,
+            "What is Odysee?",
+            [],
+            "NNTNz25N0fwU",
+            "eseme",
+            false,
+            false,
+            3,
+            1,
+            null,
+            false,
+            "/static/v141/images/blank-profile.png",
+            []
+        ],
+        [
+            "69qwpTIezaJCb0AHunNBghQMPFcXWowWWiK0",
+            null,
+            "2023-03-24 00:24:10.178391+00:00",
+            null,
+            "I am really starting to get why this has to be a soft exposure! The normies are going to literally freak the FARK out! Thanks BW your dedication to getting this out is truly appreciated!",
+            [],
+            "vcWaw0oRGYPZ",
+            "3nd5laveryQ",
+            false,
+            false,
+            6,
+            1,
+            null,
+            false,
+            "https://static-3.bitchute.com/live/profile_images/vcWaw0oRGYPZ/nhl3wJhdEu8OYdvTaGOVXBy7_medium.jpg",
+            []
+        ],
+        [
+            "wr5uoJppgCBrHn21eIydrAEHQ6ZEhn7nn7tR",
+            null,
+            "2023-03-24 00:25:43.415416+00:00",
+            null,
+            "Thank you to both Christian21 and bluewater.  I'm glad that Ryushin apologized, we all make mistakes, none of us is perfect.",
+            [],
+            "7EVG4eljblWD",
+            "kg78bv2",
+            false,
+            false,
+            5,
+            1,
+            null,
+            false,
+            "/static/v141/images/blank-profile.png",
+            []
+        ],
+        [
+            "2bNNBVb6ySQ0tNHkNoURaWavXPyL6tmE0dkP",
+            "wdGCZbdx9aCwZ4fDGvAg0tk58wEKS1L481Te",
+            "2023-03-24 00:27:15.508892+00:00",
+            null,
+            "It is an app like Rumble and BitChute that Bluewater is on.",
+            [],
+            "DAu6hPe36k5K",
+            "PJStitcher",
+            false,
+            false,
+            2,
+            1,
+            null,
+            false,
+            "/static/v141/images/blank-profile.png",
+            []
+        ],
+        [
+            "opFpX4MxSdZYMgXn7m0V8HFoahZ9TOF3XIwd",
+            null,
+            "2023-03-24 00:32:36.503395+00:00",
+            null,
+            "💜💜💜",
+            [],
+            "zC9exgm75dJG",
+            "Janeybell",
+            false,
+            false,
+            1,
+            0,
+            null,
+            false,
+            "/static/v141/images/blank-profile.png",
+            []
+        ],
+        [
+            "1XEXyQ5Gs8Wqi7JNCMIqpivvMcwfl5jEKZKV",
+            null,
+            "2023-03-24 00:34:58.928846+00:00",
+            null,
+            "Thank you for this Blue and Christian21!",
+            [],
+            "b1cOBqmTurdZ",
+            "fuchsia",
+            false,
+            false,
+            0,
+            0,
+            null,
+            false,
+            "https://static-3.bitchute.com/live/profile_images/b1cOBqmTurdZ/pfykURW9xNIULWdFaCNtQQoa_medium.jpg",
+            []
+        ],
+        [
+            "j4VhOHiDfCFzY78XWr7FyxjCZV3Fcxmt9ORd",
+            "wdGCZbdx9aCwZ4fDGvAg0tk58wEKS1L481Te",
+            "2023-03-24 00:36:22.977951+00:00",
+            null,
+            "https://odysee.com/@Bluewater:e",
+            [],
+            "S39g9uIZsV8L",
+            "lupin in the green",
+            false,
+            false,
+            1,
+            0,
+            null,
+            false,
+            "https://static-3.bitchute.com/live/profile_images/S39g9uIZsV8L/E61gvkIIRWiJQE2aNducAcuv_medium.jpg",
+            []
+        ]
+    ],
+    "callTime": "1679618290.1224425",
+    "dynamicSeconds": 300,
+    "normalizedCommentCount": 10,
+    "isUniversalPin": false,
+    "pinnedCommentId": null
+}
+    
+    """
+	
+	pass

+ 123 - 11
extensions/twitter_archive_facade/facade.py

@@ -292,7 +292,9 @@ def tweet_model_vm (tweet_data) -> List[h_vm.FeedItem]:
             retweet_count = int(tweet_data['retweet_count']),
             reply_count = 0,
             quote_count = 0
-        )
+        ),
+        
+        debug_source_data = tweet_data
     )
     
     return t
@@ -332,6 +334,50 @@ def get_tweets (ids, me = None):
     
     return collection_page
 
+def get_tweets_search_sql (sql = None, sql_params = [], to_feed_item_fn=tweet_model_vm):
+    print('get_tweets_search_sql')
+    tweet_source = ArchiveTweetSource(ARCHIVE_TWEETS_PATH)
+    
+    db_tweets = tweet_source.search_tweets_sql(sql, sql_params)
+                                                    
+    tweets = list(map(to_feed_item_fn, db_tweets))
+    
+    collection_page = h_vm.CollectionPage(
+        id = sql,
+        items = tweets,
+        total_count = len(tweets) # FIXME it's up to the client to do pagination
+    )
+    
+    return collection_page
+
+def tweets_search_content (q, pagination_token = None, max_results = 100, to_feed_item_fn=tweet_model_vm):
+    print('tweets_search_content')
+    tweet_source = ArchiveTweetSource(ARCHIVE_TWEETS_PATH)
+    
+    if pagination_token == None:
+        pagination_token = 0
+    
+    next_token = pagination_token + max_results
+    
+    sql = 'select * from tweet where full_text like(?) order by created_at asc limit ?,?'
+    sql_params = [f'%{q}%', pagination_token, next_token]
+    
+    db_tweets = tweet_source.search_tweets_sql(sql, sql_params)
+                                                    
+    tweets = list(map(to_feed_item_fn, db_tweets))
+    
+    if len(tweets) < (next_token - pagination_token):
+        next_token = None
+    
+    collection_page = h_vm.CollectionPage(
+        id = sql,
+        items = tweets,
+        total_count = len(tweets), # FIXME it's up to the client to do pagination
+        next_token = next_token
+    )
+    
+    return collection_page
+
 def get_tweet (tweet_id, me = None):
     ids = [tweet_id]
     
@@ -346,7 +392,11 @@ def register_content_sources ():
     
     content_system.register_content_source('twitter:tweets', get_tweets, id_pattern='')
     content_system.register_content_source('twitter:tweet:', get_tweet, id_pattern='([\d]+)')
-
+    
+    content_system.register_content_source('twitter:tweets:search', tweets_search_content, id_pattern='')
+    
+    content_system.register_content_source('twitter:tweets:search:sql', get_tweets_search_sql, id_pattern='')
+    
 @twitter_app.route('/profile/<user_id>.html', methods=['GET'])
 def get_profile_html (user_id):
     
@@ -436,12 +486,11 @@ def post_media_upload ():
 def get_tweets_search (response_format='json'):
     
     search = request.args.get('q')
-    limit = int(request.args.get('limit', 10000))
+    limit = int(request.args.get('limit', 100))
     offset = int(request.args.get('offset', 0))
     
     in_reply_to_user_id = int(request.args.get('in_reply_to_user_id', 0))
     
-    db = sqlite3.connect(TWEET_DB_PATH)
     
     sql = """
 select
@@ -469,15 +518,16 @@ from tweet
         sql += ' offset ?'
         sql_params.append(offset)
     
-    cur = db.cursor()
-    cur.row_factory = sqlite3.Row
+    #collection_page = content_system.get_content('twitter:tweets:search:sql', sql=sql, sql_params=sql_params)
     
-    tweets = list(map(dict, cur.execute(sql, sql_params).fetchall()))
-    cur.close()
-    db.close()
+    collection_page = content_system.get_content('twitter:tweets:search', q=search, pagination_token=offset, max_results=limit)
+    
+    tweets = collection_page.items
     
     result = None
     
+    print(f'tweet archive. search results length={len(tweets)}')
+    
     if response_format  == 'cards.json':
         cards = list(map(db_tweet_to_card, tweets))
         
@@ -505,8 +555,10 @@ from tweet
             "tweets": rows
         }
     elif response_format == 'html':
-        tweets = list(map(tweet_model_vm, tweets))
-        query = {}
+        #tweets = list(map(tweet_model_vm, tweets))
+        query = {
+            'next_data_url': url_for('.get_tweets_search', response_format=response_format, limit=limit, offset=collection_page.next_token)
+        }
         profile_user = {}
         return render_template('search.html', user = profile_user, tweets = tweets, query = query)
     else:
@@ -517,6 +569,66 @@ from tweet
     
     return Response(json.dumps(result), mimetype="application/json")
 
+
+
+@twitter_app.route('/tweets/on-this-day.html', methods=['GET'])
+def get_tweets_on_this_day ():
+    
+    otd_method = request.args.get("otd_method", "traditional")
+    
+    if otd_method == "calendar":
+        otd_where_sql = """
+            -- "on this day" calendar-wise
+            and week = now_week
+            and dow = now_dow
+        """
+    else:
+        otd_where_sql = """
+            -- "on this day" traditional
+            and `month` = now_month
+            and dom = now_dom
+        """
+
+    sql = f"""
+select
+	*,
+	cast(strftime('%Y', created_at) as integer) as `year`, 
+	cast(strftime('%m', created_at) as integer) as `month`, 
+	cast(strftime('%d', created_at) as integer) as dom, 
+	cast(strftime('%W', created_at) as integer) as week, 
+	cast(strftime('%w', created_at) as integer) as dow,
+	cast(strftime('%j', created_at) as integer) as doy,
+	
+	
+	
+	datetime(current_timestamp, 'localtime') as now_ts,
+	cast(strftime('%Y', datetime(current_timestamp, 'localtime')) as integer) as now_year,
+	cast(strftime('%m', datetime(current_timestamp, 'localtime')) as integer) as now_month,
+	cast(strftime('%d', datetime(current_timestamp, 'localtime')) as integer) as now_dom,
+	cast(strftime('%W', datetime(current_timestamp, 'localtime')) as integer) as now_week, 
+	cast(strftime('%w', datetime(current_timestamp, 'localtime')) as integer) as now_dow,
+	cast(strftime('%j', datetime(current_timestamp, 'localtime')) as integer) as now_doy
+from tweet
+where
+    true
+	{otd_where_sql}
+	
+    """
+    
+    sql_params = []
+    
+    collection_page = content_system.get_content('twitter:tweets:search:sql', sql=sql, sql_params=sql_params)
+    
+    tweets = collection_page.items
+    
+    query = {}
+    profile_user = {}
+    
+    return render_template('search.html', user = profile_user, tweets = tweets, query = query)
+
+
+
+
 @twitter_app.route('/tweets', methods=['POST'])
 def post_tweets ():
     tweets_path = ARCHIVE_TWEETS_PATH

+ 255 - 3
extensions/twitter_v2_facade/content_source.py

@@ -144,8 +144,9 @@ def get_user_feed (user_id, pagination_token=None, me=None, exclude_replies=Fals
     if not tweets_response:
         print('no response_json')
         
-    if tweets_response.meta.result_count == 0:
+    if tweets_response.meta and tweets_response.meta.result_count == 0:
         print('no results')
+        print(tweets_response)
     
     if not tweets_response.includes:
         print(tweets_response)
@@ -172,7 +173,7 @@ def get_user_feed (user_id, pagination_token=None, me=None, exclude_replies=Fals
     
     return collection_page
 
-def get_tweets_collection (tweet_ids, pagination_token=None, max_results=None):
+def get_tweets_collection (content_ids, pagination_token=None, max_results=None):
     """
     We might be able to have a generalizer in the content system as well...
     If a source exposes a get many interface then use it. We want to avoid many singular fetches.
@@ -198,10 +199,261 @@ def get_user (user_id, me=None):
     user = user_model_dc(users_response.data[0])
     
     return user
+    
+
+def get_home_feed (user_id, me, **query_kwargs):
+    
+    twitter_user = session.get(me)
+    token = twitter_user['access_token']
+    
+    tweet_source = ApiV2TweetSource(token)
+    response = tweet_source.get_home_timeline(user_id, **query_kwargs)
+    
+    #print(json.dumps(response_json, indent=2))
+    
+    includes = response.includes
+    tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, me), response.data))
+    next_token = response.meta.next_token
+    
+    collection_page = CollectionPage(
+        id = user_id,
+        items = tweets,
+        next_token = next_token
+    )
+    
+    return collection_page
+
+
+
+def get_tweet_replies (conversation_id, in_reply_to_id=None, pagination_token=None, max_results=None, author_id=None):
+    tweet_source = ApiV2TweetSource(token)
+    
+    only_replies = view == 'replies'
+    
+    
+    tweets = []
+
+    
+    skip_embed_replies = False
+    
+    if view == 'replies':
+        replies_response = tweet_source.get_thread(in_reply_to_id,
+                                                only_replies=True,
+                                                pagination_token = pagination_token,
+                                                return_dataclass=True)
+    elif view == 'thread':
+        skip_embed_replies = True
+        replies_response = tweet_source.get_thread(conversation_id,
+                                                only_replies=False,
+                                                author_id=author_id,
+                                                pagination_token = pagination_token,
+                                                return_dataclass=True)
+                                                
+    elif view == 'conversation':
+        replies_response = tweet_source.get_thread(conversation_id,
+                                                only_replies=False,
+                                                pagination_token = pagination_token,
+                                                return_dataclass=True)
+    elif view == 'tweet':
+        replies_response = None
+    
+    next_token = None
+    
+    #print("conversation meta:")
+    #print(json.dumps(tweets_response.get('meta'), indent=2))
+    
+    if replies_response and replies_response.meta and replies_response.meta.result_count:
+    
+        includes = replies_response.includes
+        tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, g.me, expand_path=request.args.get('expand'), reply_depth=1), replies_response.data)) + tweets
+        
+        next_token = replies_response.meta.next_token
+    
+    # this method is OK except it doesn't work if there are no replies.
+    #tweets.append(tweet_model(includes, list(filter(lambda t: t['id'] == tweet_id, includes.get('tweets')))[0], me))
+
+    
+    #related_tweets = [] # derived from includes
+    
+    tweets.reverse()
+    
+    
+    
+    query = {}
+    
+    if next_token:
+        query = {
+            **query,
+            # FIXME only_replies
+            'next_data_url': url_for('.get_tweet2_html', tweet_id=tweet_id, pagination_token=next_token, only_replies = '1' if only_replies else '0', author_id = tweets[0].author_id),
+            'next_page_url': url_for('.get_tweet2_html', tweet_id=tweet_id, view=view, pagination_token=next_token)
+        }
+        
+    user = {
+
+    }
+
+    
+    
+    if view == 'replies':
+        tweet = tweets[0]
+        
+        if tweet.id == '1608510741941989378':
+            unreplied = [
+                UnrepliedSection(
+                    description = "Not clear what GS is still.",
+                    span = (40, 80)
+                )
+            ]
+            tweet = replace(tweet,
+                unreplied = unreplied
+                )
+        
+        expand_parts = request.args.get('expand')
+        if expand_parts:
+            expand_parts = expand_parts.split(',')
+        
+        def reply_to_thread_item (fi):
+            nonlocal expand_parts
+            
+            if fi.id == '1609714342211244038':
+                print(f'reply_to_thread_item id={fi.id}')
+                unreplied = [
+                    UnrepliedSection(
+                        description = "Is there proof of this claim?",
+                        span = (40, 80)
+                    )
+                ]
+                fi = replace(fi,
+                    unreplied = unreplied
+                    )
+            
+            children = None
+            
+            if expand_parts and len(expand_parts) and fi.id == expand_parts[0]:
+                expand_parts = expand_parts[1:]
+                
+                print(f'getting expanded replied for tweet={fi.id}')
+                
+                expanded_replies_response = tweet_source.get_thread(fi.id,
+                                            only_replies=True,
+                                            return_dataclass=True)
+                if expanded_replies_response.data:
+                    print('we got expanded responses data')
+                    
+                    children =  list(map(lambda t: tweet_model_dc_vm(expanded_replies_response.includes, t, g.me, expand_path=request.args.get('expand'), reply_depth=1), expanded_replies_response.data))
+                    children = list(map(reply_to_thread_item, children))
+            
+            
+            return ThreadItem(feed_item=fi, children=children)
+            
+        children = list(map(reply_to_thread_item, tweets[1:]))
+        
+        root = ThreadItem(
+            feed_item = tweet,
+            children = children
+        )
+        return render_template('tweet-thread.html', user = user, root = root, query = query, page_nav=page_nav, skip_embed_replies=skip_embed_replies, opengraph_info=opengraph_info)
+    else:
+        return render_template(f'tweet-collection{theme_variant}.html', user = user, tweets = tweets, query = query, page_nav=page_nav, skip_embed_replies=skip_embed_replies, opengraph_info=opengraph_info)
+
+
+def get_following_users (user_id, me=None, max_results=1000, pagination_token=None):
+    
+    if me:
+        twitter_user = session.get(me)
+        token = twitter_user['access_token']
+    else:
+        token = os.environ.get('BEARER_TOKEN')
+    
+    social_source = TwitterApiV2SocialGraph(token)
+    
+    following_resp = social_source.get_following(user_id, 
+        max_results=max_results, pagination_token=pagination_token, return_dataclass=True)
+    
+    ts = int(time.time() * 1000)
+    with open(f'{DATA_DIR}/cache/following_{user_id}_{ts}.json', 'wt') as f:
+        f.write(json.dumps(cleandict(asdict(following_resp))))
+    
+    #print(following_resp)
+    #run_script('on_user_seen', {'twitter_user': g.twitter_user, 'users': following_resp})
+    
+    #following = list(map(lambda f: f['id'], following_resp.get('data')))
+
+    following = list(map(user_model_dc, following_resp.data))
+    total_count = following_resp.meta.get('result_count')
+    next_token = following_resp.meta.get('next_token')
+    
+    collection_page = CollectionPage(
+        id = user_id,
+        items = following,
+        total_count = total_count,
+        next_token = next_token
+        )
+    
+    return collection_page
+
+
+def get_followers_user (user_id, me=None, max_results=1000, pagination_token=None):
+    
+    if me:
+        twitter_user = session.get(me)
+        token = twitter_user['access_token']
+    else:
+        token = os.environ.get('BEARER_TOKEN')
+    
+    use_cache = False # this concept is broken for now
+    
+    
+    
+    if use_cache: # this concept is broken for now
+        print(f'using cache for user {user_id}: {use_cache}')
+        with open(f'.data/cache/followers_{user_id}_{pagination_token}_{use_cache}.json', 'rt') as f:
+            response_json = json.load(f)
+    else:
+        social_source = TwitterApiV2SocialGraph(token)
+        followers_resp = social_source.get_followers(user_id, max_results=max_results, pagination_token=pagination_token, return_dataclass=True)
+    
+        ts = int(time.time() * 1000)
+        
+        print(f'followers cache for {user_id}: {ts}')
+        
+        with open(f'{DATA_DIR}/cache/followers_{user_id}_{ts}.json', 'wt') as f:
+            json.dump(cleandict(asdict(followers_resp)), f, indent=2)
+        
+        #print(followers_resp)
+    #run_script('on_user_seen', {'twitter_user': g.twitter_user, 'users': followers_resp})
+    
+    #followers = list(map(lambda f: f['id'], followers_resp.get('data')))
+    followers = followers_resp.data
+    
+    followers = list(map(user_model_dc, followers))
+    
+    followers = list(map(user_model_dc, followers_resp.data))
+    total_count = followers_resp.meta.get('result_count')
+    next_token = followers_resp.meta.get('next_token')
+    
+    collection_page = CollectionPage(
+        id = user_id,
+        items = followers,
+        total_count = total_count,
+        next_token = next_token
+        )
+    
+    return collection_page
 
 def register_content_sources ():
-    register_content_source('twitter:tweets', get_tweets_collection)
+    register_content_source('twitter:tweets', get_tweets_collection, id_pattern='')
     register_content_source('twitter:tweet:', get_tweet_item, id_pattern='(?P<tweet_id>\d+)')
     register_content_source('twitter:bookmarks:', get_bookmarks_feed, id_pattern='(?P<user_id>\d+)')
     register_content_source('twitter:feed:user:', get_user_feed, id_pattern='(?P<user_id>\d+)')
     register_content_source('twitter:user:', get_user, id_pattern='(?P<user_id>\d+)')
+    
+    
+    register_content_source('twitter:feed:reverse_chronological:user:', get_home_feed, id_pattern='(?P<user_id>\d+)')
+    
+    
+    register_content_source('twitter:tweets:replies:', get_tweet_replies, id_pattern='(?P<conversation_id>\d+)')
+    
+    register_content_source('twitter:following:users:', get_following_users, id_pattern='(?P<user_id>\d+)')
+    register_content_source('twitter:followers:user:', get_followers_user, id_pattern='(?P<user_id>\d+)')

+ 217 - 45
extensions/twitter_v2_facade/facade.py

@@ -448,67 +448,243 @@ def get_tweet_html (tweet_id):
 
 
 
-@twitter_app.route('/followers/<user_id>.html', methods=['GET'])
-def get_followers_html (user_id):
-    
-    if not g.twitter_user:
-        return 'need to log in.', 403
+@twitter_app.route('/tweet2/<tweet_id>.html', methods=['GET'])
+def get_tweet2_html (tweet_id):
     
-    use_cache = request.args.get('use_cache')
     
-    token = g.twitter_user['access_token']
+    pagination_token = request.args.get('pagination_token')
+    view = request.args.get('view', 'replies')
     
-    social_source = TwitterApiV2SocialGraph(token)
     
-    if use_cache:
-        print(f'using cache for user {user_id}: {use_cache}')
-        with open(f'.data/cache/followers_{user_id}_{use_cache}.json', 'rt') as f:
-            response_json = json.load(f)
+    if g.twitter_user:
+        token = g.twitter_user['access_token']
     else:
-        response_json = social_source.get_followers(user_id, max_results=1000, return_dataclass=True)
+        token = os.environ.get('BEARER_TOKEN')
+    
+    
+    tweet_source = ApiV2TweetSource(token)
     
-        ts = int(time.time() * 1000)
+    only_replies = view == 'replies'
+    
+    
+    tweets = []
+    if not pagination_token:
+        tweet_page = get_content(f'twitter:tweet:{tweet_id}', me=me)
         
-        print(f'followers cache for {user_id}: {ts}')
+        tweets.append(tweet_page.items[0])
+    
+    
+    replies_page = get_content(f'twitter:tweets:replies:{tweet_id}', me=g.me)
+    
+    skip_embed_replies = False
+    
+    if view == 'replies':
+        replies_response = tweet_source.get_thread(tweet_id,
+                                                only_replies=True,
+                                                pagination_token = pagination_token,
+                                                return_dataclass=True)
+    elif view == 'thread':
+        skip_embed_replies = True
+        replies_response = tweet_source.get_thread(tweet_id,
+                                                only_replies=False,
+                                                author_id=tweets[0].author_id,
+                                                pagination_token = pagination_token,
+                                                return_dataclass=True)
+                                                
+    elif view == 'conversation':
+        replies_response = tweet_source.get_thread(tweet_id,
+                                                only_replies=False,
+                                                pagination_token = pagination_token,
+                                                return_dataclass=True)
+    elif view == 'tweet':
+        replies_response = None
+    
+    next_token = None
+    
+    #print("conversation meta:")
+    #print(json.dumps(tweets_response.get('meta'), indent=2))
+    
+    if replies_response and replies_response.meta and replies_response.meta.result_count:
+    
+        includes = replies_response.includes
+        tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, g.me, expand_path=request.args.get('expand'), reply_depth=1), replies_response.data)) + tweets
         
-        with open(f'{DATA_DIR}/cache/followers_{user_id}_{ts}.json', 'wt') as f:
-            json.dump(response_json, f, indent=2)
+        next_token = replies_response.meta.next_token
+    
+    # this method is OK except it doesn't work if there are no replies.
+    #tweets.append(tweet_model(includes, list(filter(lambda t: t['id'] == tweet_id, includes.get('tweets')))[0], me))
+
+    
+    #related_tweets = [] # derived from includes
+    
+    tweets.reverse()
+    
+    
+    
+    query = {}
+    
+    if next_token:
+        query = {
+            **query,
+            # FIXME only_replies
+            'next_data_url': url_for('.get_tweet_html', tweet_id=tweet_id, pagination_token=next_token, only_replies = '1' if only_replies else '0', author_id = tweets[0].author_id),
+            'next_page_url': url_for('.get_tweet_html', tweet_id=tweet_id, view=view, pagination_token=next_token)
+        }
         
-        #print(response_json)
-    #run_script('on_user_seen', {'twitter_user': g.twitter_user, 'users': response_json})
+    user = {
+
+    }
+    
+    if 'HX-Request' in request.headers:
+
+        
+        # console.log(res.tweets.map(t => t.text).join("\n\n-\n\n"))
+        return render_template('partial/tweets-timeline.html', user = user, tweets = tweets, query = query)
+    else:
+        page_nav = [
+            dict(
+                href=url_for('.get_tweet_html', tweet_id=tweets[0].conversation_id, view='thread'),
+                label = 'author thread',
+                order = 10
+            ),
+            dict(
+                href = url_for('.get_tweet_html', tweet_id=tweets[0].conversation_id, view='conversation'),
+                label = 'full convo',
+                order = 20
+            )
+        ]
+        
+        tweet = tweets_response.data[0]
+        user = list(filter(lambda u: u.id == tweet.author_id, tweets_response.includes.users))[0]
+        
+        source_url = f'https://twitter.com/{user.username}/status/{tweet_id}'
+        title = f'Tweet by {user.name} at {tweet.created_at}'
+        
+        opengraph_info = dict(
+            type = 'webpage', # threads might be article
+            url = source_url,
+            title = title,
+            description = tweet.text,
+            image = user.profile_image_url
+        )
+        
+        
+        if view == 'replies':
+            tweet = tweets[0]
+            
+            if tweet.id == '1608510741941989378':
+                unreplied = [
+                    UnrepliedSection(
+                        description = "Not clear what GS is still.",
+                        span = (40, 80)
+                    )
+                ]
+                tweet = replace(tweet,
+                    unreplied = unreplied
+                    )
+            
+            expand_parts = request.args.get('expand')
+            if expand_parts:
+                expand_parts = expand_parts.split(',')
+            
+            def reply_to_thread_item (fi):
+                nonlocal expand_parts
+                
+                if fi.id == '1609714342211244038':
+                    print(f'reply_to_thread_item id={fi.id}')
+                    unreplied = [
+                        UnrepliedSection(
+                            description = "Is there proof of this claim?",
+                            span = (40, 80)
+                        )
+                    ]
+                    fi = replace(fi,
+                        unreplied = unreplied
+                        )
+                
+                children = None
+                
+                if expand_parts and len(expand_parts) and fi.id == expand_parts[0]:
+                    expand_parts = expand_parts[1:]
+                    
+                    print(f'getting expanded replied for tweet={fi.id}')
+                    
+                    expanded_replies_response = tweet_source.get_thread(fi.id,
+                                                only_replies=True,
+                                                return_dataclass=True)
+                    if expanded_replies_response.data:
+                        print('we got expanded responses data')
+                        
+                        children =  list(map(lambda t: tweet_model_dc_vm(expanded_replies_response.includes, t, g.me, expand_path=request.args.get('expand'), reply_depth=1), expanded_replies_response.data))
+                        children = list(map(reply_to_thread_item, children))
+                
+                
+                return ThreadItem(feed_item=fi, children=children)
+                
+            children = list(map(reply_to_thread_item, tweets[1:]))
+            
+            root = ThreadItem(
+                feed_item = tweet,
+                children = children
+            )
+            return render_template('tweet-thread.html', user = user, root = root, query = query, page_nav=page_nav, skip_embed_replies=skip_embed_replies, opengraph_info=opengraph_info)
+        else:
+            return render_template(f'tweet-collection{theme_variant}.html', user = user, tweets = tweets, query = query, page_nav=page_nav, skip_embed_replies=skip_embed_replies, opengraph_info=opengraph_info)
+
+
+
+
+@twitter_app.route('/followers/<user_id>.html', methods=['GET'])
+def get_followers_html (user_id):
     
-    #followers = list(map(lambda f: f['id'], response_json.get('data')))
-    followers = response_json.data
+    me = g.me
     
-    followers = list(map(user_model_dc, followers))
+    content_params =  cleandict({
+        'max_results': int(request.args.get('max_results', 1000)),
+        'pagination_token': request.args.get('pagination_token')
+    })
+    
+    followers_page = get_content(f'twitter:followers:user:{user_id}', **content_params)
+    
+    followers = followers_page.items
+    
+    content_params['pagination_token'] = followers_page.next_token
+    
+    query = {
+        'next_data_url': url_for('.get_followers_html', me=me, user_id=user_id, **content_params)
+    }
+    
+    if 'HX-Request' in request.headers:
+        return render_template('partial/users-list.html', users=followers, query=query)
+    else:
+        return render_template('followers.html', users=followers, query=query)
     
-    return render_template('following.html', users=followers)
 
 
 @twitter_app.route('/following/<user_id>.html', methods=['GET'])
 def get_following_html (user_id):
     
-    if not g.twitter_user:
-        return 'need to log in.', 403
+    me = g.me
     
-    token = g.twitter_user['access_token']
+    content_params =  cleandict({
+        'max_results': int(request.args.get('max_results', 1000)),
+        'pagination_token': request.args.get('pagination_token')
+    })
     
-    social_source = TwitterApiV2SocialGraph(token)
+    following_page = get_content(f'twitter:following:users:{user_id}', **content_params)
     
-    response_json = social_source.get_following(user_id, max_results=1000, return_dataclass=True)
+    following = following_page.items
     
-    ts = int(time.time() * 1000)
-    with open(f'{DATA_DIR}/cache/following_{user_id}_{ts}.json', 'wt') as f:
-        f.write(json.dumps(response_json))
-    
-    #print(response_json)
-    #run_script('on_user_seen', {'twitter_user': g.twitter_user, 'users': response_json})
+    content_params['pagination_token'] = following_page.next_token
     
-    #following = list(map(lambda f: f['id'], response_json.get('data')))
-
-    following = list(map(user_model_dc, response_json.data))
+    query = {
+        'next_data_url': url_for('.get_following_html', me=me, user_id=user_id, **content_params)
+    }
     
-    return render_template('following.html', users=following)
+    if 'HX-Request' in request.headers:
+        return render_template('partial/users-list.html', users=following, query=query)
+    else:
+        return render_template('following.html', users=following, query=query)
     
 
 # ---------------------------------------------------------------------------------------------------------
@@ -601,14 +777,10 @@ def get_timeline_home_html (variant = "reverse_chronological", pagination_token=
         'end_time': request.args.get('end_time')
     })
     
-    tweet_source = ApiV2TweetSource(token)
-    response = tweet_source.get_home_timeline(user_id, **tq)
-    
-    #print(json.dumps(response_json, indent=2))
+    timeline_page = get_content(f'twitter:feed:reverse_chronological:user:{user_id}', me=g.me, **tq)
     
-    includes = response.includes
-    tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, g.me), response.data))
-    next_token = response.meta.next_token
+    next_token = timeline_page.next_token
+    tweets = timeline_page.items
     
     tq['pagination_token'] = next_token
     

+ 1 - 1
extensions/twitter_v2_facade/view_model.py

@@ -176,7 +176,7 @@ def tweet_model_dc_vm (includes: TweetExpansions, tweet: Tweet, me, my_url_for=u
             retweeted_tweet_id = retweet_of[0].id,
             source_retweeted_by_url = 'https://twitter.com/{}'.format(user.username),
             retweeted_by = user.name,
-            retweeted_by_url = my_url_for('.get_profile_html', user_id=user.id)
+            retweeted_by_url = my_url_for('twitter_v2_facade.get_profile_html', user_id=user.id)
             )
     
     

+ 22 - 2
hogumathi_app/__main__.py

@@ -13,6 +13,7 @@ from flask_cors import CORS
 
 from .web import api
 
+from . import content_system as h_cs
 from . import item_collections, view_model as h_vm
 from .item_collections import item_collections_bp
 
@@ -125,6 +126,24 @@ instagram_enabled = False
 
 add_account_enabled = True
 
+
+def record_content_analytics (content_id, content):
+    if type(content) == h_vm.CollectionPage:
+        prefix = None
+        if content_id.startswith('twitter:feed:user:') or content_id.startswith('twitter:bookmarks:') or content_id.startswith('twitter:feed:reverse_chronological:user:'):
+            prefix = 'twitter:tweet:'
+        if not prefix:
+            return;
+            
+        for item in content.items:
+            h_cs.invoke_hooks('got_content', f'{prefix}{item.id}', item)
+    elif type(content) == h_vm.FeedItem:
+        print(f'record_content_analytics: feed item: {content_id}')
+    elif type(content) == h_vm.FeedServiceUser:
+        print(f'record_content_analytics: user: {content_id}')
+    else:
+        print(f'record_content_analytics: unknown type: {content_id}')
+
 if __name__ == '__main__':
     glitch_enabled = os.environ.get('PROJECT_DOMAIN') and True
     
@@ -156,8 +175,9 @@ if __name__ == '__main__':
     oauth2_login.app_access_token = os.environ.get("BEARER_TOKEN")
     oauth2_login.app_consumer_key = os.environ.get("TWITTER_CONSUMER_KEY")
     oauth2_login.app_secret_key = os.environ.get("TWITTER_CONSUMER_SECRET")
-
-
+    
+    h_cs.register_hook('got_content', record_content_analytics)
+    
     @api.before_request
     def add_config ():
         g.twitter_enabled = twitter_enabled

+ 44 - 25
hogumathi_app/content_system.py

@@ -25,8 +25,12 @@ https://www.bonobo-project.org/
 
 import re
 import inspect
+from functools import lru_cache
+import time
 
-
+def get_ttl_hash(seconds=3600):
+    """Return the same value withing `seconds` time period"""
+    return round(time.time() / seconds)
 
 class ContentSystem:
     def __init__ (self):
@@ -44,7 +48,7 @@ class ContentSystem:
 
         self.content_sources[ source_id ] = [id_prefix, content_source_fn, id_pattern, source_id, weight]
 
-
+    @lru_cache(maxsize=1024) # NOTE: mutating return value mutates cached value
     def find_content_id_args (self, id_pattern, content_id):
         id_args = re.fullmatch(id_pattern, content_id)
         if not id_args:
@@ -56,8 +60,9 @@ class ContentSystem:
             args = id_args.groups()
         
         return args, kwargs
-        
-    def get_content (self, content_id, content_source_id=None, *extra_args, **extra_kwargs):
+    
+    @lru_cache(maxsize=64) # NOTE: mutating return value mutates cached value
+    def get_content (self, content_id, content_source_id=None, ttl_hash=get_ttl_hash(60), *extra_args, **extra_kwargs):
         print(f'get_content {content_id}')
         #source_ids = list(self.content_sources.keys())
         #source_ids.sort(key=lambda id_prefix: len(id_prefix), reverse=True)
@@ -77,13 +82,18 @@ class ContentSystem:
             
             source_content_id = content_id[len(id_prefix):]
             
-            print(f'get_content {content_id} from source {source_id}, resolves to {source_content_id} ( weight={weight})')
+            # HACK
+            if not id_prefix.endswith(':') and source_content_id:
+                continue
+            
+            print(f'get_content (id={source_content_id}) from source {source_id}, resolves to {source_content_id} ( weight={weight})')
             
             args, kwargs = self.find_content_id_args(id_pattern, source_content_id)
             
+            # HACK
             if id_prefix.endswith(':') and not args and not kwargs:
                 continue
-            
+                
             if extra_args:
                 args += extra_args
             
@@ -101,8 +111,9 @@ class ContentSystem:
                 self.invoke_hooks('got_content', content_id, content)
                 
                 return content
-
-    def get_all_content (self, content_ids):
+    
+    @lru_cache(maxsize=8) # NOTE: mutating return value mutates cached value
+    def get_all_content (self, content_ids, enable_bulk_fetch=False, ttl_hash=get_ttl_hash(60)):
         """
         Get content from all sources, using a grouping call if possible.
         
@@ -115,9 +126,9 @@ class ContentSystem:
         from a Swipe file.
         """
         
-        return self.get_all_content2(content_ids)
+        return self.get_all_content2(content_ids, enable_bulk_fetch=enable_bulk_fetch)
         
-    def get_all_content2 (self, content_collection_ids, content_args = None, max_results = None):
+    def get_all_content2 (self, content_collection_ids, content_args = None, max_results = None, enable_bulk_fetch=False):
         """
         Takes a list of collection IDs and content_args is a map of (args, kwargs) keyed by collection ID.
         
@@ -154,16 +165,17 @@ class ContentSystem:
         for content_id in content_collection_ids:
             
             is_bulk = False
-            for bulk_prefix in bulk_prefixes:
-                if content_id.startswith(bulk_prefix):
-                    bulk_content_id = bulk_prefixes[ bulk_prefix ]
-                    if not bulk_content_id in bulk_requests:
-                        bulk_requests[ bulk_content_id ] = []
-                    bulk_requests[ bulk_content_id ].append(content_id)
-                    
-                    # max size for a content source...
-                    
-                    is_bulk = True
+            if enable_bulk_fetch:
+                for bulk_prefix in bulk_prefixes:
+                    if content_id.startswith(bulk_prefix):
+                        bulk_content_id = bulk_prefixes[ bulk_prefix ]
+                        if not bulk_content_id in bulk_requests:
+                            bulk_requests[ bulk_content_id ] = []
+                        bulk_requests[ bulk_content_id ].append(content_id)
+                        
+                        # max size for a content source...
+                        
+                        is_bulk = True
                     
             if is_bulk:
                 continue
@@ -180,7 +192,7 @@ class ContentSystem:
             
             
             
-            bulk_response = self.get_content(bulk_content_id, content_ids=content_ids) # FIXME me=... workaround, provide bulk id in args map
+            bulk_response = self.get_content(bulk_content_id, content_ids=tuple(content_ids)) # FIXME me=... workaround, provide bulk id in args map
             
             print(f'bulk_response: {bulk_response}')
             
@@ -221,7 +233,10 @@ class ContentSystem:
             #except TypeError as e:
             #    print ('tried to call a hook with wrong args. no problem')
             #    continue
-                
+
+
+
+
 # The app was coded before we turned this into a class...
 # so we proxy calls with the old interface to this default instance.
 DEFAULT = ContentSystem()
@@ -238,10 +253,14 @@ def get_content (content_id, content_source_id=None, *extra_args, **extra_kwargs
     print('compat get_content')
     return DEFAULT.get_content(content_id, content_source_id, *extra_args, **extra_kwargs)
     
-def get_all_content (content_ids):
+def get_all_content (content_ids, enable_bulk_fetch=False):
     print('compat get_all_content')
-    return DEFAULT.get_all_content(content_ids)
+    return DEFAULT.get_all_content(content_ids, enable_bulk_fetch=enable_bulk_fetch)
     
 def register_hook (hook_type, hook_fn, *extra_args, **extra_kwargs):
     print('compat register_hook')
-    return DEFAULT.register_hook(hook_type, hook_fn, *extra_args, **extra_kwargs)
+    return DEFAULT.register_hook(hook_type, hook_fn, *extra_args, **extra_kwargs)
+    
+def invoke_hooks (hook_type, *args, **kwargs):
+    print('compat invoke_hooks')
+    return DEFAULT.invoke_hooks(hook_type, *args, **kwargs)

+ 1 - 1
hogumathi_app/item_collections.py

@@ -419,7 +419,7 @@ def get_collection (collection_id, me=None, pagination_token:str = None, max_res
     items = collection['items'][first_idx:last_idx]
     content_ids = list(map(lambda item: item['id'], items))
     
-    content_responses = get_all_content( content_ids )
+    content_responses = get_all_content( tuple(content_ids), enable_bulk_fetch=True )
     
     feed_items = list(map(lambda item: expand_item2(item, me, content_responses), items))
     

BIN=BIN
hogumathi_app/static/img/brand/harlanji_logo.png


+ 16 - 0
hogumathi_app/static/tweets-ui.js

@@ -1,3 +1,19 @@
+function getSelectedTweetIds () {
+	var tweetIds = Array.from(document.querySelectorAll("*[name='select_tweet']")).filter(cb => cb.checked).map(cb => cb.value)
+	
+	return tweetIds
+}
+
+function selectAllTweets (isSelected) {
+	if (isSelected == undefined) {
+		isSelected = true;
+	}
+	
+	var tweetSelectionEls = Array.from(document.querySelectorAll("*[name='select_tweet']"));
+	
+	tweetSelectionEls.forEach(tsEl => tsEl.checked = isSelected)
+}
+
 function swapVideoPlayer(imgEl, videoUrl, videoType) {
   
   if (videoType == 'video/youtube' || videoType == 'video/bitchute') {

+ 1 - 1
hogumathi_app/templates/followers.html

@@ -2,7 +2,7 @@
 
 {% block content %}
 
-{% if twitter_live_enabled %}
+{% if False and twitter_live_enabled %}
 <div class="w-100">
 <h2>Follower activity</h2>
 

+ 4 - 0
hogumathi_app/templates/partial/timeline-tweet.html

@@ -1,6 +1,10 @@
 
 <div class="dtc w-10">
 	<img loading="lazy"  src="{{ tweet.avi_icon_url }}" alt="Avi">
+	{% if True or enable_select_tweets %}
+	<br>
+	<input type="checkbox" name="select_tweet" value="{{ tweet.id }}">
+	{% endif %}
 </div>
 <div class="dtc w-90 v-top">
 	{% if tweet.title %}

+ 19 - 0
hogumathi_app/templates/partial/users-list.html

@@ -6,4 +6,23 @@
 	{% include "partial/user-card.html" %}
 	</li>
 {% endfor %}
+
+{% if query.next_data_url %}
+
+	<li style="height: 50px; vertical-align: middle"
+		hx-get="{{ query.next_data_url }}"
+		hx-trigger="revealed"
+		hx-swap="outerHTML"
+		hx-select="ul.users > li"
+		>
+		<center style="height: 100%">
+
+		<span class="js-only">
+		Loading more users...
+		</span>
+
+		</center>
+	</li>
+{% endif %}
+
 </ul>

+ 6 - 6
hogumathi_app/test/unit/hogumathi_app_test/test_content_system.py

@@ -70,7 +70,7 @@ def test_bulk_content ():
     h_cs.register_content_source('twitter:tweet:', tweet_cs)
     h_cs.register_content_source('twitter:tweets', tweets_cs, id_pattern='')
     
-    tweets = h_cs.get_all_content(['twitter:tweet:1', 'twitter:tweet:2', 'fake:1'])
+    tweets = h_cs.get_all_content(['twitter:tweet:1', 'twitter:tweet:2', 'fake:1'], enable_bulk_fetch=True)
     
     
     assert(tweets == {
@@ -98,7 +98,7 @@ def test_bulk_content_partial_miss ():
     h_cs.register_content_source('twitter:tweets', tweets_cs, id_pattern='')
     h_cs.register_content_source('', tweets_cache_cs, id_pattern='(.+)', weight=99999)
     
-    tweets = h_cs.get_all_content(['twitter:tweet:1', 'twitter:tweet:2', 'fake:1'])
+    tweets = h_cs.get_all_content(['twitter:tweet:1', 'twitter:tweet:2', 'fake:1'], enable_bulk_fetch=True)
     
     
     assert(tweets == {
@@ -120,7 +120,7 @@ def test_hooks_bulk_content ():
     
     h_cs.register_hook('got_content', got_content)
     
-    content = h_cs.get_all_content(['twitter:tweet:1', 'twitter:tweet:2', 'fake:1'])
+    content = h_cs.get_all_content(['twitter:tweet:1', 'twitter:tweet:2', 'fake:1'], enable_bulk_fetch=True)
     
     assert(content == {
         'twitter:tweet:1': {'text': 'one', 'id': '1'},
@@ -157,7 +157,7 @@ def test_hooks_bulk_content_multi_bulk ():
     content = h_cs.get_all_content(['twitter:tweet:1', 'twitter:tweet:2',
                                     'youtube:video:3', 'youtube:video:4',
                                     'instagram:post:3',
-                                    'fake:1'])
+                                    'fake:1'], enable_bulk_fetch=True)
     
     assert(content == {
         'twitter:tweet:1': {'text': 'one', 'id': '1'},
@@ -285,14 +285,14 @@ def test_cache_bulk ():
     content = h_cs.get_all_content(['twitter:tweet:1', 'twitter:tweet:2',
                                     'youtube:video:3', 'youtube:video:4',
                                     'instagram:post:3',
-                                    'fake:1'])
+                                    'fake:1'], enable_bulk_fetch=True)
     
     assert(cache_hits == [])
     
     content2 = h_cs.get_all_content(['twitter:tweet:1', 'twitter:tweet:2',
                                     'youtube:video:3', 'youtube:video:4',
                                     'instagram:post:3',
-                                    'fake:1'])
+                                    'fake:1'], enable_bulk_fetch=True)
     
     assert(content == content2)
     

+ 3 - 3
hogumathi_app/view_model.py

@@ -3,7 +3,7 @@
 """
 
 from dataclasses import dataclass, asdict, replace
-from typing import List, Dict, Optional, Tuple
+from typing import List, Dict, Optional, Tuple, Union
 
 import re
 
@@ -253,7 +253,7 @@ class Collection:
 
 @dataclass
 class CollectionItem:
-    item: List[FeedServiceUser|FeedItem|ThreadItem|Collection] = None
+    item: List[Union[FeedServiceUser,FeedItem,ThreadItem,Collection]] = None
     sort_order: Optional[int] = None
     after_id: Optional[str] = None
 
@@ -266,7 +266,7 @@ class CollectionPage:
     """
     
     id: str
-    items: Optional[List[FeedServiceUser|FeedItem|ThreadItem|CollectionItem|Collection]] = None
+    items: Optional[List[Union[FeedServiceUser,FeedItem,ThreadItem,CollectionItem,Collection]]] = None
     next_token: Optional[str] = None
     last_dt: Optional[str] = None
     total_count: Optional[int] = None

+ 1 - 0
hogumathi_app/web.py

@@ -8,6 +8,7 @@ import requests
 from flask import Flask, g, redirect, url_for, render_template, jsonify, request, send_from_directory
 
 from . import content_system as h_cs
+from . import view_model as h_vm
 
 api = Flask(__name__, static_url_path='')
 

+ 15 - 21
lib/twitter_v2/archive.py

@@ -56,15 +56,7 @@ class ArchiveTweetSource:
         sql_params.append(max_results)
         
         
-        db = self.get_db()
-        
-        cur = db.cursor()
-        cur.row_factory = sqlite3.Row
-        
-        print(sql)
-        print(sql_params)
-        
-        results = list(map(dict, cur.execute(sql, sql_params).fetchall()))
+        results = self.search_tweets_sql(sql, sql_params)
         
         return results
     
@@ -87,22 +79,24 @@ class ArchiveTweetSource:
         
         sql = "select * from tweet where {}".format(where_sql)
         
-        db = self.get_db()
-        
-        cur = db.cursor()
-        cur.row_factory = sqlite3.Row
-        
-        results = list(map(dict, cur.execute(sql, sql_params).fetchall()))
+        results = self.search_tweets_sql(sql, sql_params)
         
         results.sort(key=lambda t: ids.index(t['id']))
         
         return results
     
-    def search_tweets (self,
-                       query,
-                       since_id = None,
-                       max_results = 10,
-                       sort_order = None
+    def search_tweets_sql (self,
+                       sql,
+                       sql_params = []
                        ):
         
-        return
+        with self.get_db() as db:
+            cur = db.cursor()
+            cur.row_factory = sqlite3.Row
+            
+            
+            results = list(map(dict, cur.execute(sql, sql_params).fetchall()))
+        
+        print(f'search_tweets_sql {len(results)}')
+        
+        return results