فهرست منبع

snapshot 2023-07-16

Harlan Iverson 1 سال پیش
والد
کامیت
8da59d54fd
46فایلهای تغییر یافته به همراه6858 افزوده شده و 103 حذف شده
  1. 35 0
      example-data/bookmarks-ispoogedaily.json
  2. 0 0
      example-data/dm-response.json
  3. 1 0
      example-data/ig-embed-test.html
  4. 7 0
      example-data/instagram-examples/ig-embed-CnFcCdZO-b3.html
  5. 7 0
      example-data/instagram-examples/ig-embed-Cp4tONOOQqz.html
  6. 7 0
      example-data/instagram-examples/ig-embed-captioned-CoYH5GhuQMp-1080.html
  7. 8 0
      example-data/instagram-examples/ig-embed-captioned-CoYH5GhuQMp.html
  8. 6 0
      example-data/instagram-examples/ig-embed-captioned-Cos_6WWOluO.html
  9. 6 0
      example-data/instagram-examples/ig-embed-captioned-Cp4tONOOQqz.html
  10. 0 0
      example-data/instagram-examples/ig-official-embed.html
  11. 198 0
      example-data/instagram-examples/init-struct-CoYH5GhuQMp.json
  12. 0 0
      example-data/instagram-examples/profile-ispoogedaily-b.html
  13. 0 0
      example-data/instagram-examples/profile-ispoogedaily-posts-2-b.json
  14. 0 0
      example-data/instagram-examples/profile-ispoogedaily-posts-2.json
  15. 0 0
      example-data/instagram-examples/profile-ispoogedaily-posts-b.json
  16. 0 0
      example-data/instagram-examples/profile-ispoogedaily-posts.json
  17. 0 0
      example-data/instagram-examples/profile-ispoogedaily.html
  18. 1474 0
      example-data/instagram-examples/script-CoYH5GhuQMp-ast.json
  19. 1474 0
      example-data/instagram-examples/script-Cos_6WWOluO-ast.json
  20. 1474 0
      example-data/instagram-examples/script-Cp4tONOOQqz-ast.json
  21. 1474 0
      example-data/instagram-examples/script-CpOdc8vOQMT-ast.json
  22. 0 0
      example-data/instagram-ispoogedaily.html
  23. 0 0
      example-data/teespring-products.json
  24. 0 0
      example-data/tweets-counts-recent-ispoogedaily-hourly
  25. 186 0
      example-data/tweets-ispoogedaily.json
  26. 1 0
      example-data/tweets-multiple-with-attachments.json
  27. 0 0
      example-data/tweets-timeline-home_page-1
  28. 0 0
      example-data/tweets-timeline-home_page-1.cards.json
  29. 0 0
      example-data/tweets-timeline-ispoogedaily_full
  30. 0 0
      example-data/tweets-timeline-ispoogedaily_page-1
  31. 0 0
      example-data/tweets-timeline-ispoogedaily_page-2
  32. 36 0
      example-data/twitter-embed-iframe.html
  33. 1 0
      example-data/unofficial-oembed-tweet.json
  34. 226 31
      extensions/twitter_v2_facade/content_source.py
  35. 87 0
      extensions/twitter_v2_facade/facade.py
  36. 42 0
      extensions/twitter_v2_facade/templates/gaps.html
  37. 22 6
      extensions/twitter_v2_facade/view_model.py
  38. 27 1
      hogumathi_app/__init__.py
  39. 10 0
      hogumathi_app/__main__.py
  40. 2 2
      hogumathi_app/content_system.py
  41. 8 0
      hogumathi_app/static/theme/base.css
  42. 4 0
      hogumathi_app/templates/partial/timeline-tweet.html
  43. 1 1
      hogumathi_app/templates/partial/tweets-timeline.html
  44. 2 0
      hogumathi_app/view_model.py
  45. 10 6
      hogumathi_app/web.py
  46. 22 56
      lib/twitter_v2/api.py

+ 35 - 0
example-data/bookmarks-ispoogedaily.json

@@ -0,0 +1,35 @@
+{
+  "data": [
+    {
+      "created_at": "2021-10-24T20:11:44.000Z",
+      "id": "1452367223570436108",
+      "text": "Go forward.\n\nI miss my career of 20 years more than my dead family. I've spent years homeless, through a pandemic. Fuck. Could have came at a more convenient time.\n\nIf I'm not on drugs or even alcohol then you don't need anything for your stress and can forget your depression.",
+      "author_id": "14520320"
+    },
+    {
+      "created_at": "2018-07-24T04:07:21.000Z",
+      "id": "1021607744682565635",
+      "text": "Productivity is a philosophy, and few have thought on it more deeply than a worker who helped change Google Calendar https://t.co/dnizqHa44q",
+      "author_id": "571202103"
+    }
+  ],
+  "includes": {
+    "users": [
+      {
+        "username": "iSpoogeDaily",
+        "created_at": "2008-04-25T00:50:14.000Z",
+        "id": "14520320",
+        "name": "Harlan 🐝 Small Tech Sensei"
+      },
+      {
+        "username": "Medium",
+        "created_at": "2012-05-04T20:16:39.000Z",
+        "id": "571202103",
+        "name": "Medium"
+      }
+    ]
+  },
+  "meta": {
+    "result_count": 2
+  }
+}

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/dm-response.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 1 - 0
example-data/ig-embed-test.html


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 7 - 0
example-data/instagram-examples/ig-embed-CnFcCdZO-b3.html


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 7 - 0
example-data/instagram-examples/ig-embed-Cp4tONOOQqz.html


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 7 - 0
example-data/instagram-examples/ig-embed-captioned-CoYH5GhuQMp-1080.html


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 8 - 0
example-data/instagram-examples/ig-embed-captioned-CoYH5GhuQMp.html


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 6 - 0
example-data/instagram-examples/ig-embed-captioned-Cos_6WWOluO.html


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 6 - 0
example-data/instagram-examples/ig-embed-captioned-Cp4tONOOQqz.html


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/instagram-examples/ig-official-embed.html


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 198 - 0
example-data/instagram-examples/init-struct-CoYH5GhuQMp.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/instagram-examples/profile-ispoogedaily-b.html


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/instagram-examples/profile-ispoogedaily-posts-2-b.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/instagram-examples/profile-ispoogedaily-posts-2.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/instagram-examples/profile-ispoogedaily-posts-b.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/instagram-examples/profile-ispoogedaily-posts.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/instagram-examples/profile-ispoogedaily.html


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 1474 - 0
example-data/instagram-examples/script-CoYH5GhuQMp-ast.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 1474 - 0
example-data/instagram-examples/script-Cos_6WWOluO-ast.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 1474 - 0
example-data/instagram-examples/script-Cp4tONOOQqz-ast.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 1474 - 0
example-data/instagram-examples/script-CpOdc8vOQMT-ast.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/instagram-ispoogedaily.html


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/teespring-products.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/tweets-counts-recent-ispoogedaily-hourly


+ 186 - 0
example-data/tweets-ispoogedaily.json

@@ -0,0 +1,186 @@
+{
+  "data": [
+    {
+      "text": "Why did I avoid Python?\n\nBecause I didn't like the whitespace sensitive syntax.\n\nIt's fine.\n\nPython is awesome.\n\nThe Virtual Env thing is a little wonky but NPM is just... the JS ecosystem is like Flash back in the day.\n\nPython development reminds me more of PHP or Clojure.",
+      "id": "1543997476767776772",
+      "author_id": "14520320",
+      "created_at": "2022-07-04T16:37:39.000Z"
+    },
+    {
+      "text": "RT @SaveYourSons: https://t.co/mJnQ3i3X4Q",
+      "id": "1543957467813535745",
+      "author_id": "14520320",
+      "created_at": "2022-07-04T13:58:40.000Z",
+      "entities": {
+        "mentions": [
+          {
+            "start": 3,
+            "end": 16,
+            "username": "SaveYourSons",
+            "id": "1291732240456650754"
+          }
+        ]
+      },
+      "attachments": {
+        "media_keys": [
+          "3_1543634251228250113"
+        ]
+      },
+      "referenced_tweets": [
+        {
+          "type": "retweeted",
+          "id": "1543634254743191554"
+        }
+      ]
+    },
+    {
+      "text": "RT @PsychePoetry: Creativity demands deep absorption of the mind in an activity without time constraint.\n\nProductivity demands a mechanisat…",
+      "id": "1543956355450560514",
+      "author_id": "14520320",
+      "created_at": "2022-07-04T13:54:15.000Z",
+      "entities": {
+        "mentions": [
+          {
+            "start": 3,
+            "end": 16,
+            "username": "PsychePoetry",
+            "id": "1478976688344080385"
+          }
+        ]
+      },
+      "referenced_tweets": [
+        {
+          "type": "retweeted",
+          "id": "1543933326670438401"
+        }
+      ]
+    },
+    {
+      "text": "I've never Tweeted on alcohol but I've Tweeted in closed rooms with black mold.\n\nWhat data to we need to find the Tweets and their change of behavior?",
+      "id": "1543463114372513795",
+      "author_id": "14520320",
+      "created_at": "2022-07-03T05:14:17.000Z"
+    },
+    {
+      "text": "https://t.co/KW6BVVDWZG",
+      "id": "1543459814780665856",
+      "author_id": "14520320",
+      "created_at": "2022-07-03T05:01:10.000Z"
+    },
+    {
+      "text": "A house divided will surely fall.\n\n-The bible or some shit.",
+      "id": "1543456738556497920",
+      "in_reply_to_user_id": "14520320",
+      "author_id": "14520320",
+      "created_at": "2022-07-03T04:48:57.000Z",
+      "referenced_tweets": [
+        {
+          "type": "replied_to",
+          "id": "1543456011398299650"
+        }
+      ]
+    },
+    {
+      "text": "My critics and I can't exist within the same circles.\n\nI don't make the rules.\n\nThey cut, I pick.\n\nI don't make the rules.",
+      "id": "1543456011398299650",
+      "author_id": "14520320",
+      "created_at": "2022-07-03T04:46:03.000Z"
+    },
+    {
+      "text": "I used to just accept people and try things they wanted to do for fun.\n\nDon't want to be closed minded, right?\n\nI'm a curious and agnostic fellow.\n\nTurns out that if you play with trash\n\nYour hands get all nasty.\n\nI wasn't taught to avoid shit people\n\nBy shit parents.",
+      "id": "1543452665514020864",
+      "in_reply_to_user_id": "14520320",
+      "author_id": "14520320",
+      "created_at": "2022-07-03T04:32:46.000Z",
+      "referenced_tweets": [
+        {
+          "type": "replied_to",
+          "id": "1543450176387530753"
+        }
+      ]
+    },
+    {
+      "text": "My life smelled really good before I became unhoused\n\nAnd after I left my parents' house.\n\nI still remember the feeling of having a life that smells good.",
+      "id": "1543451552526090241",
+      "author_id": "14520320",
+      "created_at": "2022-07-03T04:28:20.000Z"
+    },
+    {
+      "text": "PUAs hold back from Tweeting on the weekend\n\nBecause they need to create scarcity\n\nAnd appear like they're getting the bang.",
+      "id": "1543450176387530753",
+      "author_id": "14520320",
+      "created_at": "2022-07-03T04:22:52.000Z"
+    }
+  ],
+  "includes": {
+    "users": [
+      {
+        "created_at": "2008-04-25T00:50:14.000Z",
+        "username": "iSpoogeDaily",
+        "id": "14520320",
+        "name": "Harlan 🐝 Small Tech Sensei"
+      },
+      {
+        "created_at": "2020-08-07T13:45:48.000Z",
+        "username": "SaveYourSons",
+        "id": "1291732240456650754",
+        "name": "Save Your Sons"
+      },
+      {
+        "created_at": "2022-01-06T06:28:37.000Z",
+        "username": "PsychePoetry",
+        "id": "1478976688344080385",
+        "name": "Psyche Poetry"
+      }
+    ],
+    "media": [
+      {
+        "media_key": "3_1543634251228250113",
+        "type": "photo"
+      }
+    ],
+    "tweets": [
+      {
+        "text": "https://t.co/mJnQ3i3X4Q",
+        "id": "1543634254743191554",
+        "author_id": "1291732240456650754",
+        "created_at": "2022-07-03T16:34:20.000Z",
+        "attachments": {
+          "media_keys": [
+            "3_1543634251228250113"
+          ]
+        }
+      },
+      {
+        "text": "Creativity demands deep absorption of the mind in an activity without time constraint.\n\nProductivity demands a mechanisation of routine. https://t.co/w5FyCtmCxk",
+        "id": "1543933326670438401",
+        "author_id": "1478976688344080385",
+        "created_at": "2022-07-04T12:22:44.000Z",
+        "referenced_tweets": [
+          {
+            "type": "quoted",
+            "id": "1543887851217199105"
+          }
+        ]
+      },
+      {
+        "text": "My critics and I can't exist within the same circles.\n\nI don't make the rules.\n\nThey cut, I pick.\n\nI don't make the rules.",
+        "id": "1543456011398299650",
+        "author_id": "14520320",
+        "created_at": "2022-07-03T04:46:03.000Z"
+      },
+      {
+        "text": "PUAs hold back from Tweeting on the weekend\n\nBecause they need to create scarcity\n\nAnd appear like they're getting the bang.",
+        "id": "1543450176387530753",
+        "author_id": "14520320",
+        "created_at": "2022-07-03T04:22:52.000Z"
+      }
+    ]
+  },
+  "meta": {
+    "result_count": 10,
+    "newest_id": "1543997476767776772",
+    "oldest_id": "1543450176387530753",
+    "next_token": "7140dibdnow9c7btw4228cb6zm75xcdzm8w4m73xi5cmr"
+  }
+}

+ 1 - 0
example-data/tweets-multiple-with-attachments.json

@@ -0,0 +1 @@
+{"data": [{"conversation_id": "1565753737276039168", "text": "I don\u2019t like it when my editor likes my tweets when I\u2019m supposed to be working on the column it feels like a threat", "author_id": "17998052", "public_metrics": {"retweet_count": 15, "reply_count": 20, "like_count": 499, "quote_count": 11}, "id": "1565753737276039168", "created_at": "2022-09-02T17:29:15.000Z"}, {"conversation_id": "1565681377491689472", "attachments": {"media_keys": ["3_1565683665010556928"]}, "text": "What these trends look like, indexed to Feb 2020 employment levels. https://t.co/HHP5sFPcDg", "author_id": "14348157", "public_metrics": {"retweet_count": 8, "reply_count": 2, "like_count": 50, "quote_count": 2}, "id": "1565683801862246401", "referenced_tweets": [{"type": "replied_to", "id": "1565681377491689472"}], "created_at": "2022-09-02T12:51:22.000Z"}], "includes": {"users": [{"username": "Eve6", "created_at": "2008-12-09T18:31:51.000Z", "location": "Los Angeles, CA", "id": "17998052", "name": "eve6"}, {"username": "crampell", "created_at": "2008-04-10T03:37:39.000Z", "id": "14348157", "name": "Catherine Rampell"}], "media": [{"media_key": "3_1565683665010556928", "type": "photo"}], "tweets": [{"conversation_id": "1565681377491689472", "text": "Private-sector employment is now way above its pre-pandemic levels (+885k).\nPublic sector? Completely different story.\nState+local government combined are still down 647k jobs since Feb 2020\nLots of stories about teacher shortages - but the shortages are happening all across govt", "author_id": "14348157", "public_metrics": {"retweet_count": 150, "reply_count": 22, "like_count": 416, "quote_count": 34}, "id": "1565681377491689472", "created_at": "2022-09-02T12:41:44.000Z"}]}}

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/tweets-timeline-home_page-1


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/tweets-timeline-home_page-1.cards.json


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/tweets-timeline-ispoogedaily_full


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/tweets-timeline-ispoogedaily_page-1


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
example-data/tweets-timeline-ispoogedaily_page-2


+ 36 - 0
example-data/twitter-embed-iframe.html

@@ -0,0 +1,36 @@
+<iframe 
+id="twitter-widget-0" scrolling="no" frameborder="0" allowtransparency="true" allowfullscreen="true" class="" style="position: static; visibility: visible; width: 550px; height: 755px; display: block; flex-grow: 1;" title="Twitter Tweet" src="
+
+https://platform.twitter.com/embed/Tweet.html?
+
+dnt=false
+&amp;
+embedId=twitter-widget-0
+&amp;
+features=eyJ0ZndfdGltZWxpbmVfbGlzdCI6eyJidWNrZXQiOltdLCJ2ZXJzaW9uIjpudWxsfSwidGZ3X2ZvbGxvd2VyX2NvdW50X3N1bnNldCI6eyJidWNrZXQiOnRydWUsInZlcnNpb24iOm51bGx9LCJ0ZndfdHdlZXRfZWRpdF9iYWNrZW5kIjp7ImJ1Y2tldCI6Im9uIiwidmVyc2lvbiI6bnVsbH0sInRmd19yZWZzcmNfc2Vzc2lvbiI6eyJidWNrZXQiOiJvbiIsInZlcnNpb24iOm51bGx9LCJ0ZndfbWl4ZWRfbWVkaWFfMTU4OTciOnsiYnVja2V0IjoidHJlYXRtZW50IiwidmVyc2lvbiI6bnVsbH0sInRmd19leHBlcmltZW50c19jb29raWVfZXhwaXJhdGlvbiI6eyJidWNrZXQiOjEyMDk2MDAsInZlcnNpb24iOm51bGx9LCJ0ZndfZHVwbGljYXRlX3NjcmliZXNfdG9fc2V0dGluZ3MiOnsiYnVja2V0Ijoib24iLCJ2ZXJzaW9uIjpudWxsfSwidGZ3X3ZpZGVvX2hsc19keW5hbWljX21hbmlmZXN0c18xNTA4MiI6eyJidWNrZXQiOiJ0cnVlX2JpdHJhdGUiLCJ2ZXJzaW9uIjpudWxsfSwidGZ3X2xlZ2FjeV90aW1lbGluZV9zdW5zZXQiOnsiYnVja2V0Ijp0cnVlLCJ2ZXJzaW9uIjpudWxsfSwidGZ3X3R3ZWV0X2VkaXRfZnJvbnRlbmQiOnsiYnVja2V0Ijoib24iLCJ2ZXJzaW9uIjpudWxsfX0%3D
+&amp;
+frame=false
+&amp;
+hideCard=false
+&amp;
+hideThread=false
+&amp;
+id=1643883650629480448
+&amp;
+lang=en
+&amp;
+origin=https%3A%2F%2Fpublish.twitter.com%2F%3Fquery%3Dhttps%253A%252F%252Ftwitter.com%252FiSpoogeDaily%252Fstatus%252F1643883650629480448%26widget%3DTweet
+&amp;
+sessionId=2beb0e5b5d6e7ee4078045c5bcbbfe2947140f79
+&amp;
+theme=light
+&amp;
+widgetsVersion=aaf4084522e3a%3A1674595607486
+&amp;
+width=550px
+" data-tweet-id="1643883650629480448"></iframe>
+
+
+Features: Base64 encoded:
+
+{"tfw_timeline_list":{"bucket":[],"version":null},"tfw_follower_count_sunset":{"bucket":true,"version":null},"tfw_tweet_edit_backend":{"bucket":"on","version":null},"tfw_refsrc_session":{"bucket":"on","version":null},"tfw_mixed_media_15897":{"bucket":"treatment","version":null},"tfw_experiments_cookie_expiration":{"bucket":1209600,"version":null},"tfw_duplicate_scribes_to_settings":{"bucket":"on","version":null},"tfw_video_hls_dynamic_manifests_15082":{"bucket":"true_bitrate","version":null},"tfw_legacy_timeline_sunset":{"bucket":true,"version":null},"tfw_tweet_edit_frontend":{"bucket":"on","version":null}}

+ 1 - 0
example-data/unofficial-oembed-tweet.json

@@ -0,0 +1 @@
+{"url":"https:\/\/twitter.com\/themarvlee\/status\/1519353215447355393","author_name":"MARVELOUS \uD83E\uDD4B","author_url":"https:\/\/twitter.com\/themarvlee","html":"\u003Cblockquote class=\"twitter-tweet\"\u003E\u003Cp lang=\"en\" dir=\"ltr\"\u003EThe foremost thing to understand is that the mind doesn’t exist as an an organ in the body, it’s just a term used to describe the thinking function of the brain - aka mental chattering with one’s self.\u003C\/p\u003E&mdash; MARVELOUS \uD83E\uDD4B (@themarvlee) \u003Ca href=\"https:\/\/twitter.com\/themarvlee\/status\/1519353215447355393?ref_src=twsrc%5Etfw\"\u003EApril 27, 2022\u003C\/a\u003E\u003C\/blockquote\u003E\n\u003Cscript async src=\"https:\/\/platform.twitter.com\/widgets.js\" charset=\"utf-8\"\u003E\u003C\/script\u003E\n","width":550,"height":null,"type":"rich","cache_age":"3153600000","provider_name":"Twitter","provider_url":"https:\/\/twitter.com","version":"1.0"}

+ 226 - 31
extensions/twitter_v2_facade/content_source.py

@@ -9,6 +9,8 @@ And the rest of the Taxonomy.
 
 from dataclasses import asdict
 from typing import List, Optional
+import dacite
+
 import os
 from flask import session, g, request
 import time
@@ -17,6 +19,8 @@ import json
 import sqlite3
 
 from twitter_v2.api import ApiV2TweetSource, TwitterApiV2SocialGraph, ApiV2ConversationSource
+import twitter_v2.types as tv2_types
+
 
 import hogumathi_app.view_model as h_vm
 
@@ -55,6 +59,7 @@ def init_cache_db ():
             accessed_at timestamp,
             query_id int,
             data text,
+            created_at timestamp,
             unique(id, query_id)
         )
         """)
@@ -94,7 +99,12 @@ def cache_tweets_response (response_tweets, query_type, auth_user_id, user_id =
     tweets = response_tweets.data or []
     users = includes and includes.users or []
     media = includes and includes.media or []
-    next_token = response_tweets.meta.next_token
+    ref_tweets = includes and includes.tweets or []
+    
+    if response_tweets.meta and 'next_token' in response_tweets.meta:
+        next_token = response_tweets.meta.next_token
+    else:
+        next_token = None
     
     db = sqlite3.connect(CACHE_PATH)
     cur = db.cursor()
@@ -151,15 +161,20 @@ def cache_tweets_response (response_tweets, query_type, auth_user_id, user_id =
                 id,
                 accessed_at,
                 query_id,
-                data
+                data,
+                created_at
                 )
             values (
-                ?,?,?,?
+                ?,?,?,?, ?
                 )
             """,
-            [ tweet.id, now, query_id, tweet_json ]
+            # dateutil.parser.parse( tweet. created_at ) if error
+            [ tweet.id, now, query_id, tweet_json, tweet.created_at ]
             )
     
+    # FIXME insert ref_tweets, mark in some way... is_ref = 1? sort_order = NULL?
+    #       sort_order begins with count having order prior to insert...
+    
     for user in users:
         user_json = json.dumps(cleandict(asdict(user)))
         
@@ -270,39 +285,178 @@ def cache_users_response (response_users, query_type, auth_user_id, user_id = No
     cur.close()
 
 
-def get_cached_query (query_type, auth_user_id, user_id=None):
+def get_cached_collection_all_latest (auth_user_id, query_type = 'bookmarks', user_id=None):
+    """
+    Across all queries of a type, return the latest distinct Tweet.
+    
+    This is good for bookmarks, likes or retweets where we remove them after a period upstream
+    but still want to fetch anything we've ever added.
+    
+    Ideally we don't need this in the long term and instead auto sync new items to a local collection.
+    "But for now."
+    """
+    
     sql = """
-        select * from query 
+        select t.id, t.accessed_at, t.data
+        from query q, tweet t
         where 
-            (auth_user_id in ('14520320') or auth_user_id is null)
-            and query_type = 'bookmarks'
+            t.query_id = q.rowid
+            and (q.auth_user_id in (:auth_user_id) or q.auth_user_id is null)
+            and q.query_type = :query_type
+            
+            -- need to store author_id with tweets to get the user data out.
+            -- could also make a join table tweet_user, like tweet_media; they won't change.
+            
+            --and u.query_id = q.rowid
+            --and u.id == t.author_id
+        
+        group by t.id
+        having t.accessed_at = max(t.accessed_at)
+        
+        order by t.id desc, t.accessed_at desc
+        limit :limit
     """
-    results = []
-    next_token = None
+    params = {
+        'query_type': query_type,
+        'auth_user_id': auth_user_id,
+        'limit': 10
+    }
+    
+    db = sqlite3.connect(CACHE_PATH)
+    cur = db.cursor()
     
-    return results, next_token
     
-def get_object_over_time (obj_type, obj_id, auth_user_id):
-    cur = None
     
-    results = cur.execute(f"""
-        --select id, count(*) c from tweet group by id having c > 1
+    cached_tweets = cur.execute(sql, params).fetchall()
+    
+    tweets = list()
+    user_ids = set()
+    media_keys = set()
+    referenced_tweet_ids = set()
+    
+    for row in cached_tweets:
+        tweet_id, accessed_at, tweet_json = row
+        tweet = dacite.from_dict( data_class=tv2_types.Tweet, data=json.loads(tweet_json) )
+        
+        user_ids.add(tweet.author_id)
+        
+        for a in tweet.attachments:
+            for mk in a.media_keys:
+                media_keys.add(mk.media_key)
+                
+        
+        #for tweet_ref in tweet.referenced_tweets:
+        #    referenced_tweet_ids.add(tweet_ref.id)
+        #    # FIXME we also need to reference these users.
+        
+        tweets.append(feed_item)
+    
+    feed_items = []
+    
+    includes = {
+        'tweets': [],
+        'users': [],
+        'media': []
+    }
+    for tweet in tweets:
+        # FIXME return view models rather than raw tweets. need to join user and media, see query comment.
+        #feed_item = tweet_model_dc_vm(tweet, ...)
+        feed_item = tweet
         
-        select t.*
+        feed_items.append(feed_item)
+    
+    return feed_items
+    
+def get_object_over_time (obj_type, obj_id, auth_user_id, only_count = False):
+    """
+    Return all occurances of an object over time,
+    Or if only_count is true then return just the count
+    """
+    db = sqlite3.connect(CACHE_PATH)
+    cur = db.cursor()
+    
+    if only_count:
+        fields = 'count(*)'
+    else:
+        fields = 't.*'
+    
+    results = cur.execute(f"""
+        select {fields}
         from {obj_type} t, query q
         where 
             t.id = :obj_id
             and q.rowid = t.query_id
             and (q.auth_user_id in (:auth_user_id) or q.auth_user_id is null)
+        
         """,
         {
             'obj_id': obj_id,
             'auth_user_id': auth_user_id
-        })
-    results = []
-    next_token = None
+        }).fetchall()
+    
+    if only_count:
+        return results[0][0]
+    else:
+        return list(results)
+
+
+def get_query_gaps (auth_user_id, query_type = 'home_feed', min_gap_hours = 1.0, max_age_days = 21.0):
+    
+    
+    sql = """
+    WITH ordered_tweets AS
+    (
+        SELECT 
+            t.*,
+            q.auth_user_id,
+            (julianday(current_timestamp) - julianday(t.created_at)) as row_age_days, 
+            ROW_NUMBER() OVER (ORDER BY t.created_at asc) rn
+        FROM tweet t
+        JOIN query q 
+            on q.rowid = t.query_id
+        WHERE 
+            q.query_type = :QUERY_TYPE
+            AND q.auth_user_id = :AUTH_USER_ID
+            AND row_age_days < :MAX_AGE_DAYS
+    )
+    SELECT
+        o1.id since_id,
+        o1.created_at start_time,
+        o2.id until_id,
+        o2.created_at end_time,
+        --CAST(strftime('%s', o2.created_at) as integer) - CAST(strftime('%s', o1.created_at) as integer) gap_seconds2,
+        --(julianday(o2.created_at) - julianday(o1.created_at)) * 86400 gap_seconds,
+        (julianday(o2.created_at) - julianday(o1.created_at)) * 24 gap_hours
+    FROM ordered_tweets o1
+    JOIN ordered_tweets o2
+        ON (
+        o1.rn + 1 = o2.rn
+        )
+    WHERE gap_hours >= :MIN_GAP_HOURS
+    order by start_time desc
+    """
+    
+    params = dict(
+        QUERY_TYPE = query_type,
+        AUTH_USER_ID = auth_user_id,
+        MAX_AGE_DAYS = max_age_days,
+        MIN_GAP_HOURS = min_gap_hours
+    )
+    
+    db = sqlite3.connect(CACHE_PATH)
+    
+    cur = db.cursor()
+    cur.row_factory = sqlite3.Row
+    
+    results = cur.execute(sql, params).fetchall()
+    
+    cur.close()
     
-    return results, next_token
+    rows = list(map(dict, results))
+    
+    return rows
+
+
 
 def get_tweet_item (tweet_id, me=None):
     
@@ -335,7 +489,9 @@ def get_tweet_item (tweet_id, me=None):
             
             print(json.dumps(err, indent=2))
     
-    
+    if not tweets_response.data:
+        return
+        
     includes = tweets_response.includes
     tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, me), tweets_response.data))
     
@@ -418,9 +574,15 @@ def get_bookmarks_feed (user_id, pagination_token=None, max_results=10, me=None)
     
     cache_tweets_response(response_tweets, 'bookmarks', user_id, user_id=user_id, pagination_token=pagination_token)
     
-    includes = response_tweets.includes
-    tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, me), response_tweets.data))
-    next_token = response_tweets.meta.next_token
+    if response_tweets.data:
+        includes = response_tweets.includes
+        tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, me), response_tweets.data))
+        next_token = response_tweets.meta.next_token
+    else:
+        print('no tweet data:')
+        print(response_tweets)
+        tweets = []
+        next_token = None
     
     query = {}
     
@@ -463,6 +625,8 @@ def get_user_feed (user_id, me=None, **twitter_kwargs):
     
     
     tweet_source = ApiV2TweetSource(token)
+    
+    
     tweets_response = tweet_source.get_user_timeline(user_id,
                         return_dataclass=True,
                         **twitter_kwargs)
@@ -483,16 +647,29 @@ def get_user_feed (user_id, me=None, **twitter_kwargs):
         print('profile get_user_timeline errors:')
         print(tweets_response.errors)
     
+    tweets = tweets_response.data
+    
     pagination_token=twitter_kwargs.get('pagination_token')
     
+    
+    # NOTE we need to calculate this before we cache the response.
+    tweets_viewed = {}
+    
+    if auth_user_id and tweets:
+        for tweet in tweets:
+            tweet_viewed = get_object_over_time('tweet', tweet.id, auth_user_id, only_count=True)
+            #tweet_viewed = len(tweet_over_time)
+            
+            tweets_viewed[tweet.id] = tweet_viewed
+    
     cache_tweets_response(tweets_response, 'user_feed', auth_user_id, user_id=user_id, pagination_token=pagination_token)
     
     ts = int(time.time() * 1000)
     with open(f'{DATA_DIR}/cache/tl_{user_id}_{ts}_{pagination_token}.json', 'wt') as f:
         f.write(json.dumps(cleandict(asdict(tweets_response))))
     
-    if tweets_response.data:
-        tweets = list(map(lambda t: tweet_model_dc_vm(tweets_response.includes, t, me), tweets_response.data))
+    if tweets:
+        tweets = list(map(lambda t: tweet_model_dc_vm(tweets_response.includes, t, me, tweets_viewed=tweets_viewed), tweets))
     
     next_token = tweets_response.meta.next_token
     
@@ -534,7 +711,7 @@ def get_users (content_ids, me=None, pagination_token=None) -> Optional[List[h_v
     social_graph = TwitterApiV2SocialGraph(token)
     users_response = social_graph.get_users(content_ids, return_dataclass=True)
     
-    if not len(users_response.data):
+    if not users_response.data or not len(users_response.data):
         return
     
     cache_users_response(users_response, f'users', auth_user_id, pagination_token=pagination_token)
@@ -556,12 +733,30 @@ def get_home_feed (user_id, me, **query_kwargs):
     
     pagination_token = query_kwargs.get('pagination_token')
     
-    cache_tweets_response(response, 'home_feed', auth_user_id, user_id=user_id, pagination_token=pagination_token)
     
-    includes = response.includes
-    tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, me), response.data))
-    next_token = response.meta.next_token
     
+    # NOTE we need to calculate this before we cache the response.
+    tweets_viewed = {}
+    
+    if response.data:
+        for tweet in response.data:
+            tweet_viewed = get_object_over_time('tweet', tweet.id, auth_user_id, only_count=True)
+            #tweet_viewed = len(tweet_over_time)
+            
+            tweets_viewed[tweet.id] = tweet_viewed
+        
+        
+        cache_tweets_response(response, 'home_feed', auth_user_id, user_id=user_id, pagination_token=pagination_token)
+        
+        includes = response.includes
+        tweets = list(map(lambda t: tweet_model_dc_vm(includes, t, me, tweets_viewed=tweets_viewed), response.data))
+        next_token = response.meta.next_token
+    else:
+        print('no tweet data:')
+        print(response)
+        tweets = []
+        next_token = None
+        
     collection_page = CollectionPage(
         id = user_id,
         items = tweets,

+ 87 - 0
extensions/twitter_v2_facade/facade.py

@@ -49,6 +49,7 @@ if find_spec('theme_bootstrap5'): # FIXME use g.
 DATA_DIR='.data'
 
 twitter_app = Blueprint('twitter_v2_facade', 'twitter_v2_facade',
+    template_folder='templates',
     static_folder='static',
     static_url_path='',
     url_prefix='/')
@@ -599,6 +600,46 @@ def get_data_tweets_media (user_id):
 
 
 
+@twitter_app.route('/gaps.html', methods=['GET'])
+def get_gaps ():
+    if not g.twitter_user:
+        return 'need to login. go to /login.html', 403
+        
+    gaps = content_source.get_query_gaps(auth_user_id=g.twitter_user['id'], query_type='home_feed')
+    
+    for gap in gaps:
+        gap['url'] = url_for('.get_timeline_home_html',
+            since_id = gap['since_id'],
+            until_id = gap['until_id'],
+            me = g.me
+            )
+    
+    view_model = dict(
+        gaps = gaps
+    )
+    
+    return render_template('gaps.html', view_model=view_model)
+    
+
+@twitter_app.route('/cached-bookmarks.html', methods=['GET'])
+def get_cached_bookmarks ():
+    if not g.twitter_user:
+        return 'need to login. go to /login.html', 403
+        
+    feed_items = content_source.get_cached_collection_all_latest(auth_user_id=g.twitter_user['id'], query_type='bookmarks')
+    
+    view_model = dict(
+        user = {},
+        tweets = feed_items[:10],
+        query = {},
+        show_thread_controls=True
+    )
+    
+    return render_template('tweet-collection-bs.html', view_model=view_model, **view_model)
+    
+
+
+
 @twitter_app.route('/latest.html', methods=['GET'])
 def get_timeline_home_html (variant = "reverse_chronological", pagination_token=None):
     
@@ -608,11 +649,27 @@ def get_timeline_home_html (variant = "reverse_chronological", pagination_token=
     user_id = g.twitter_user['id']
     token = g.twitter_user['access_token']
     
+    # exclude_newer is a proof of concept to consume a timeline backwards.
+    # if we make a request with it as -1 then it clears from the session.
+    exclude_newer = int(request.args.get('exclude_newer', 0))
+    
+    
+    # exclude_viewed will not serve any viewed tweet, based on cache DB
+    exclude_viewed = int(request.args.get('exclude_viewed', 0))
+    
+    if exclude_newer < 0:
+        print('resetting oldest_viewed_tweet_id if set')
+        if 'oldest_viewed_tweet_id' in session:
+            del session['oldest_viewed_tweet_id']
+        exclude_newer = 0
+    
     if not pagination_token:
         pagination_token = request.args.get('pagination_token')
     
     output_format = request.args.get('format', 'html')
     
+    
+    
     tq = cleandict({
         'pagination_token': pagination_token,
         'since_id': request.args.get('since_id'),
@@ -621,11 +678,36 @@ def get_timeline_home_html (variant = "reverse_chronological", pagination_token=
         'start_time': request.args.get('start_time')
     })
     
+    
+    if exclude_newer and 'oldest_viewed_tweet_id' in session:
+        until_id = str(session.get('oldest_viewed_tweet_id'))
+        print(f'get_timeline_home_html: exclude_newer: {until_id}')
+        tq['until_id'] = until_id
+    
     timeline_page = get_content(f'twitter:feed:reverse_chronological:user:{user_id}', me=g.me, **tq)
     
     next_token = timeline_page.next_token
     tweets = timeline_page.items
     
+    if exclude_viewed:
+        tweets = list(filter(lambda t: not t.is_viewed, tweets))
+        tq['exclude_viewed'] = exclude_viewed
+    
+    if exclude_newer:
+        tq['exclude_newer'] = exclude_newer
+    
+    # oldest in collection should be last...
+    # might have an issue if it's an old RT.
+    
+    if tweets:
+        oldest_tweet_id = int(tweets[-1].id)
+        
+        if not 'oldest_viewed_tweet_id' in session:
+            session['oldest_viewed_tweet_id'] = oldest_tweet_id
+        else:
+            if oldest_tweet_id < session['oldest_viewed_tweet_id']:
+                session['oldest_viewed_tweet_id'] = oldest_tweet_id
+        
     tq['pagination_token'] = next_token
     
     query = {
@@ -1044,6 +1126,11 @@ def get_nav_items ():
             label = 'My Profile',
             order = 200
         ),
+        dict(
+            href = url_for('twitter_v2_facade.get_gaps'),
+            label = 'Gaps',
+            order = 300
+            ),
         dict (
             href = url_for('twitter_v2_facade.oauth2_login.get_logout_html'),
             label = f'Logout ({me})',

+ 42 - 0
extensions/twitter_v2_facade/templates/gaps.html

@@ -0,0 +1,42 @@
+{% extends "base.html" %}
+
+{% block content %}
+
+<h1>Gaps</h1>
+
+<p>These are gaps of at least an hour in your browsing history, from most recent to oldest.</p>
+
+<table class="table table-striped w-75">
+<thead>
+<tr>
+	<th>Start Time</th>
+	<th>End Time</th>
+	<th>Gap Length (Hours)</th>
+	<th>Link</th>
+</tr>
+</thead>
+<tbody>
+{% for gap in view_model.gaps %}
+<tr>
+	<td>
+		{{ gap.start_time }}
+	</td>
+	<td>
+		{{ gap.end_time }}
+	</td>
+	<td>
+		{{ gap.gap_hours }}
+	</td>
+	<td>
+		<a href="{{ gap.url }}">Browse</a>
+	</td>
+</tr>
+{% endfor %}
+</tbody>
+</table>
+
+<!--
+<code><pre>{{ view_model | tojson(indent = 2) }}</pre></code>
+-->
+
+{% endblock %}

+ 22 - 6
extensions/twitter_v2_facade/view_model.py

@@ -1,6 +1,6 @@
 from dataclasses import replace
 
-from flask import g, request
+from flask import g, request, session
 
 import sqlite3
 
@@ -10,6 +10,9 @@ from hogumathi_app.view_model import FeedServiceUser, FeedItem, FeedItemAction,
 
 from . import oauth2_login
 
+# FIXME we want this to mark tweets as viewed.
+# from .content_source import get_object_over_time
+
 
 url_for = oauth2_login.url_for_with_me
 
@@ -36,11 +39,25 @@ def user_model_dc (user, my_url_for=url_for):
     return fsu
 
 
-def tweet_model_dc_vm (includes: TweetExpansions, tweet: Tweet, me, my_url_for=url_for, my_g=g, reply_depth=0, expand_path=None) -> FeedItem:
+def tweet_model_dc_vm (includes: TweetExpansions, tweet: Tweet, me, my_url_for=url_for, my_g=g, reply_depth=0, expand_path=None, tweets_viewed={}) -> FeedItem:
     
     # retweeted_by, avi_icon_url, display_name, handle, created_at, text
     
     
+    # HACK we should not refer to the request directly...
+    is_marked = False
+    if request and request.args.get('marked_reply') == str(tweet.id):
+        is_marked = True
+        
+    # HACK we shouldn't go to session directly
+    #is_viewed = False
+    #if session and 'oldest_viewed_tweet_id' in session:
+    #    #print('--- tweet_model_dc_vm: checking oldest_viewed_tweet_id')
+    #    oldest_viewed_tweet_id = session.get('oldest_viewed_tweet_id')
+    #    if oldest_viewed_tweet_id < int(tweet.id):
+    #        is_viewed = True
+    is_viewed = tweets_viewed.get(tweet.id)
+    
     user = list(filter(lambda u: u.id == tweet.author_id, includes.users))[0]
     
     published_by = user_model_dc(user, my_url_for=my_url_for)
@@ -128,15 +145,14 @@ def tweet_model_dc_vm (includes: TweetExpansions, tweet: Tweet, me, my_url_for=u
         #'is_edited': len(tweet['edit_history_tweet_ids']) > 1
         
         actions = actions,
-        is_bookmarked = is_bookmarked
+        is_bookmarked = is_bookmarked,
+        is_marked = is_marked,
+        is_viewed = is_viewed
     )
     
     if reply_depth:
         t = replace(t, reply_depth = reply_depth)
     
-    # HACK we should not refer to the request directly...
-    if request and request.args.get('marked_reply') == str(t.id):
-        t = replace(t, is_marked = True)
     
     # This is where we should put "is_bookmark", "is_liked", "is_in_collection", etc...
     

+ 27 - 1
hogumathi_app/__init__.py

@@ -7,4 +7,30 @@ load_dotenv()
 sys.path.append('.data/lib')
 sys.path.append('./lib')
 sys.path.append('.data/extensions')
-sys.path.append('./extensions')
+sys.path.append('./extensions')
+
+
+import sqlite3
+
+class AccountsApp:
+    
+    def __init__ (self, db_path):
+        self.db_path = db_path
+    
+    def create_account (username, password):
+        pass
+    
+    def link_account (account_id, service, cred):
+        pass
+    
+    def oauth_redirect_url (account_id, service, cred):
+        pass
+    
+    def oauth_access_token_for_code ():
+        pass
+    
+    def oauth_refresh_token ():
+        pass
+    
+    def add_credentials_to_request (account_id, service, params = None, headers = None, cookies = None):
+        pass

+ 10 - 0
hogumathi_app/__main__.py

@@ -107,6 +107,13 @@ else:
     print('git module not found.')
     git_enabled = False
 
+if find_spec('bitchute_facade'):
+    import bitchute_facade
+    bitchute_enabled = True
+else:
+    print('bitchute module not found.')
+    bitchute_enabled = False
+
 if find_spec('videojs'):
     from videojs import videojs_bp
     videojs_enabled = True
@@ -326,6 +333,9 @@ if __name__ == '__main__':
     if git_enabled:
         git_facade.register_content_sources()
         
+    if bitchute_enabled:
+        bitchute_facade.register_content_sources()
+        
     #CORS(api)
     
     sched_app = h_sched.ScheduleApplication()

+ 2 - 2
hogumathi_app/content_system.py

@@ -116,7 +116,7 @@ class ContentSystem:
             yield content_source_fn, args, kwargs
             
     
-    @lru_cache(maxsize=64) 
+    #@lru_cache(maxsize=64) 
     def get_content (self, content_id, content_source_id=None, ttl_hash=get_ttl_hash(60), *extra_args, **extra_kwargs):
         """
         NOTE: mutating return value mutates cached value
@@ -137,7 +137,7 @@ class ContentSystem:
                 
                 return content
     
-    @lru_cache(maxsize=8) # NOTE: mutating return value mutates cached value
+    #@lru_cache(maxsize=8) # NOTE: mutating return value mutates cached value
     def get_all_content (self, content_ids, enable_bulk_fetch=False, ttl_hash=get_ttl_hash(60)):
         """
         Get content from all sources, using a grouping call if possible.

+ 8 - 0
hogumathi_app/static/theme/base.css

@@ -11,6 +11,14 @@
 	text-align: right;
 }
 
+#tweets .tweet.tweet-viewed {
+	background-color: powderblue;
+}
+
+.theme-dark #tweets .tweet.tweet-viewed {
+	background-color: lightslategray;
+}
+
 #tweets .tweet.marked {
 	background-color: powderblue;
 }

+ 4 - 0
hogumathi_app/templates/partial/timeline-tweet.html

@@ -22,6 +22,10 @@
 	
 	<a href="{{ tweet.author_url }}" class="silver">@{{ tweet.handle }}</a>
 	<a href="{{ tweet.url }}">{{ tweet.created_at }}</a> [<a href="{{ tweet.source_url }}" target="tweet_{{ tweet.id }}">source</a>]
+	
+	{% if tweet.is_viewed %}
+	[viewed]
+	{% endif %}
 	</p>
 	<p class="w-100">
 	

+ 1 - 1
hogumathi_app/templates/partial/tweets-timeline.html

@@ -100,7 +100,7 @@ function feed_item_to_activity (fi) {
 
 {% for tweet in tweets %}
 
-<li class="tweet w-100 dt {% if tweet.is_marked %}marked{% endif %}">
+<li class="tweet w-100 dt {% if tweet.is_marked %}marked{% endif %} {% if tweet.is_viewed %}tweet-viewed{% endif %}">
 <script>
 	
 	

+ 2 - 0
hogumathi_app/view_model.py

@@ -176,6 +176,8 @@ class FeedItem:
     
     # This is a TBD concept to highlight parts of a message that are a reply.
     replying_to: Optional[List[ReplyingToSection]] = None
+    
+    is_viewed: Optional[bool] = None
 
 # tm = FeedItem(id="1", text="aa", created_at="fs", display_name="fda", handle="fdsafas")
 

+ 10 - 6
hogumathi_app/web.py

@@ -88,8 +88,8 @@ def get_abc123_html ():
 
 	return 'abc123'
 
-@api.get('/content/<content_id>.html')
-def get_content_html (content_id, content_kwargs=None):
+@api.get('/content/<content_id>.<response_format>')
+def get_content_html (content_id, response_format='json', content_kwargs=None):
 	
 	if not content_kwargs:
 		content_kwargs = filter(lambda e: e[0].startswith('content:'), request.args.items())
@@ -115,10 +115,10 @@ def get_content_html (content_id, content_kwargs=None):
 @api.get('/content/def456.html')
 def get_def456_html ():
 
-	return get_content_html('brand:ispoogedaily')
+	return get_content_html('brand:ispoogedaily', response_format='html')
     
-@api.get('/content/search.html')
-def get_content_search_html ():
+@api.get('/content/search.<response_format>')
+def get_content_search_html (response_format = 'html'):
     source_id = request.args.get('source')
     q = request.args.get('q')
     pagination_token = request.args.get('pagination_token')
@@ -178,4 +178,8 @@ def get_schedule_create_job_html ():
     
     }
     
-    return render_template_string(template, **view_model)
+    return render_template_string(template, **view_model)
+
+@api.get('/health')
+def get_health ():
+    return 'ok'

+ 22 - 56
lib/twitter_v2/api.py

@@ -63,14 +63,7 @@ class TwitterApiV2SocialGraph:
         response = requests.get(url, params=params, headers=headers)
         result = json.loads(response.text)
         
-        typed_result = from_dict(data_class=UserSearchResponse, data=result)
-        
-        if return_dataclass:
-            return typed_result
-        
-        result = cleandict(asdict(typed_result))
-        
-        return result
+        return self._parse_user_search_response(result, return_dataclass=return_dataclass)
         
     def get_following (self, user_id, 
                         max_results = 50, pagination_token = None, return_dataclass=False):
@@ -99,6 +92,9 @@ class TwitterApiV2SocialGraph:
         response = requests.get(url, params=params, headers=headers)
         result = json.loads(response.text)
         
+        return self._parse_user_search_response(result, return_dataclass=return_dataclass)
+    
+    def _parse_user_search_response (self, result, return_dataclass=True):
         typed_result = from_dict(data_class=UserSearchResponse, data=result)
         
         if return_dataclass:
@@ -107,8 +103,7 @@ class TwitterApiV2SocialGraph:
         result = cleandict(asdict(typed_result))
         
         return result
-        
-        
+    
     def get_followers (self, user_id,
                         max_results = 50, pagination_token = None, return_dataclass=False):
         # GET /2/users/:id/followers
@@ -136,15 +131,7 @@ class TwitterApiV2SocialGraph:
         response = requests.get(url, params=params, headers=headers)
         result = json.loads(response.text)
         
-        typed_result = from_dict(data_class=UserSearchResponse, data=result)
-        
-        if return_dataclass:
-            return typed_result
-        
-        result = cleandict(asdict(typed_result))
-        
-        return result
-        
+        return self._parse_user_search_response(result, return_dataclass=return_dataclass)
         
     def follow_user (self, user_id, target_user_id):
         # POST /2/users/:id/following
@@ -190,10 +177,13 @@ class ApiV2ConversationSource:
         
         #print(response_json)
         
+        return self._parse_dm_events_response(response_json)
+    
+    def _parse_dm_events_response (self, response_json):
         typed_resp = from_dict(data=response_json, data_class=DMEventsResponse)
         
         return typed_resp
-        
+    
     def get_conversation (self, dm_conversation_id,
         max_results = None, pagination_token = None):
         
@@ -422,6 +412,12 @@ class ApiV2TweetSource:
         response = requests.get(url, params=params, headers=headers)
         response_json = json.loads(response.text)
         
+        print(json.dumps(response_json, indent=2))
+        
+        return self._parse_tweet_search_response(response_json, return_dataclass=return_dataclass)
+        
+    def _parse_tweet_search_response (self, response_json, return_dataclass=True):
+        
         try:
             #print(json.dumps(response_json, indent = 2))
             typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
@@ -519,21 +515,15 @@ class ApiV2TweetSource:
         
         print(json.dumps(response_json, indent=2))
         
-        typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
-        
-        if return_dataclass:
-            return typed_resp
-        
-        checked_resp = cleandict(asdict(typed_resp))
-
-        print('using checked response to search_tweets')
-        
-        return checked_resp
+        return self._parse_tweet_search_response(response_json, return_dataclass=return_dataclass)
     
     def search_tweets (self,
                        query, 
                        pagination_token = None,
                        since_id = None,
+                       until_id = None,
+                       start_time = None,
+                       end_time = None,
                        max_results = 10,
                        sort_order = None,
                        non_public_metrics = False,
@@ -592,22 +582,7 @@ class ApiV2TweetSource:
         response = requests.get(url, params=params, headers=headers)
         response_json = json.loads(response.text)
         
-        try:
-            typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
-        except:
-            print('error converting tweet search response to TweetSearchResponse')
-            print(response_json)
-            
-            raise 'error converting tweet search response to TweetSearchResponse'
-            
-        if return_dataclass:
-            return typed_resp
-        
-        checked_resp = cleandict(asdict(typed_resp))
-
-        print('using checked response to search_tweets')
-        
-        return checked_resp
+        return self._parse_tweet_search_response(response_json, return_dataclass=return_dataclass)
         
         
     
@@ -781,17 +756,8 @@ class ApiV2TweetSource:
         resp = requests.get(url, headers=headers, params=params)
         
         result = json.loads(resp.text)
-
-        typed_result = from_dict(data_class=UserSearchResponse, data=result)
         
-        #print(typed_result)
-        
-        if return_dataclass:
-            return typed_result
-        
-        result = cleandict(asdict(typed_result))
-        
-        return result
+        return self._parse_user_search_response(result, return_dataclass=return_dataclass)
         
     def like_tweet (self, tweet_id):
         #  POST /2/users/:user_id/likes

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است