tweet_source.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848
  1. from dataclasses import asdict
  2. from typing import List
  3. from dacite import from_dict
  4. import json
  5. import requests
  6. import sqlite3
  7. from twitter_v2_types import TweetSearchResponse, DMEventsResponse, UserSearchResponse
  8. class ArchiveTweetSource:
  9. """
  10. id, created_at, retweeted, favorited, retweet_count, favorite_count, full_text, in_reply_to_status_id_str, in_reply_to_user_id, in_reply_to_screen_nam
  11. """
  12. def __init__ (self, archive_path, db_path = ".data/tweet.db", archive_user_id = None):
  13. self.archive_path = archive_path
  14. self.user_id = archive_user_id
  15. self.db_path = db_path
  16. return
  17. def get_db (self):
  18. db = sqlite3.connect(self.db_path)
  19. return db
  20. def get_user_timeline (self,
  21. author_id = None, max_results = 10, since_id = None):
  22. if max_results == None:
  23. max_results = -1
  24. sql_params = []
  25. where_sql = []
  26. # if the ID is not stored as a number (eg. string) then this could be a problem
  27. if since_id:
  28. where_sql.append("cast(id as integer) > ?")
  29. sql_params.append(since_id)
  30. #if author_id:
  31. # where_sql.append("author_id = ?")
  32. # sql_params.append(author_id)
  33. where_sql = " and ".join(where_sql)
  34. sql_cols = "id, created_at, retweeted, favorited, retweet_count, favorite_count, full_text, in_reply_to_status_id_str, in_reply_to_user_id, in_reply_to_screen_name"
  35. if author_id:
  36. sql_cols += ", '{}' as author_id".format(author_id)
  37. if where_sql:
  38. where_sql = "where {}".format(where_sql)
  39. sql = "select {} from tweet {} order by cast(id as integer) asc limit ?".format(sql_cols, where_sql)
  40. sql_params.append(max_results)
  41. db = self.get_db()
  42. cur = db.cursor()
  43. cur.row_factory = sqlite3.Row
  44. print(sql)
  45. print(sql_params)
  46. results = list(map(dict, cur.execute(sql, sql_params).fetchall()))
  47. return results
  48. def get_tweet (self, id_):
  49. tweets = self.get_tweets([id_])
  50. if len(tweets):
  51. return tweets[0]
  52. def get_tweets (self,
  53. ids):
  54. sql_params = []
  55. where_sql = []
  56. ids_in_list_sql = "id in ({})".format( ','.join(['?'] * len(ids)))
  57. where_sql.append(ids_in_list_sql)
  58. sql_params += ids
  59. where_sql = " and ".join(where_sql)
  60. sql = "select * from tweet where {}".format(where_sql)
  61. db = self.get_db()
  62. cur = db.cursor()
  63. cur.row_factory = sqlite3.Row
  64. results = list(map(dict, cur.execute(sql, sql_params).fetchall()))
  65. results.sort(key=lambda t: ids.index(t['id']))
  66. return results
  67. def search_tweets (self,
  68. query,
  69. since_id = None,
  70. max_results = 10,
  71. sort_order = None
  72. ):
  73. return
  74. # https://developer.twitter.com/en/docs/twitter-api/v1/tweets/curate-a-collection/api-reference/get-collections-entries
  75. # we can perhaps steal a token from the TweetDeck Console, otherwise we need to apply for Standard v1.1 / Elevated
  76. class ApiV11TweetCollectionSource:
  77. def __init__ (self, token):
  78. self.token = token
  79. def create_collection (self, name):
  80. return
  81. def bulk_add_to_collection (self, collection_id, items):
  82. return
  83. def add_to_collection (self, collection_id, item):
  84. return
  85. def get_collection_tweets (self, collection_id):
  86. return
  87. class TwitterApiV2SocialGraph:
  88. def __init__ (self, token):
  89. self.token = token
  90. def get_user (self, user_id, is_username=False):
  91. # GET /2/users/:id
  92. # GET /2/users/by/:username
  93. return self.get_users([user_id], is_username)
  94. def get_users (self, user_ids, are_usernames=False):
  95. # GET /2/users/by?usernames=
  96. # GET /2/users?ids=
  97. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
  98. params = {
  99. 'user.fields' : ','.join(user_fields),
  100. }
  101. if are_usernames:
  102. url = "https://api.twitter.com/2/users/by"
  103. params['usernames'] = user_ids
  104. else:
  105. url = "https://api.twitter.com/2/users"
  106. params['ids'] = user_ids
  107. headers = {
  108. 'Authorization': 'Bearer {}'.format(self.token)
  109. }
  110. response = requests.get(url, params=params, headers=headers)
  111. result = json.loads(response.text)
  112. return result
  113. def get_following (self, user_id,
  114. max_results = 50, pagination_token = None, return_dataclass=False):
  115. # GET /2/users/:id/following
  116. url = "https://api.twitter.com/2/users/{}/following".format(user_id)
  117. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified"]
  118. params = {
  119. 'user.fields' : ','.join(user_fields),
  120. 'max_results': max_results
  121. }
  122. if pagination_token:
  123. params['pagination_token'] = pagination_token
  124. headers = {
  125. 'Authorization': 'Bearer {}'.format(self.token)
  126. }
  127. response = requests.get(url, params=params, headers=headers)
  128. result = json.loads(response.text)
  129. typed_result = from_dict(data_class=UserSearchResponse, data=result)
  130. if return_dataclass:
  131. return typed_result
  132. result = cleandict(asdict(typed_result))
  133. return result
  134. def get_followers (self, user_id,
  135. max_results = 50, pagination_token = None, return_dataclass=False):
  136. # GET /2/users/:id/followers
  137. url = "https://api.twitter.com/2/users/{}/followers".format(user_id)
  138. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
  139. params = {
  140. 'user.fields' : ','.join(user_fields),
  141. 'max_results': max_results
  142. }
  143. if pagination_token:
  144. params['pagination_token'] = pagination_token
  145. headers = {
  146. 'Authorization': 'Bearer {}'.format(self.token)
  147. }
  148. response = requests.get(url, params=params, headers=headers)
  149. result = json.loads(response.text)
  150. typed_result = from_dict(data_class=UserSearchResponse, data=result)
  151. if return_dataclass:
  152. return typed_result
  153. result = cleandict(asdict(typed_result))
  154. return result
  155. def follow_user (self, user_id, target_user_id):
  156. # POST /2/users/:id/following
  157. # {target_user_id}
  158. return
  159. def unfollow_user (self, user_id, target_user_id):
  160. # DELETE /2/users/:source_user_id/following/:target_user_id
  161. return
  162. class ApiV2ConversationSource:
  163. def __init__ (self, token):
  164. self.token = token
  165. def get_recent_events (self, max_results = None, pagination_token = None):
  166. # https://developer.twitter.com/en/docs/twitter-api/direct-messages/lookup/api-reference/get-dm_events
  167. url = "https://api.twitter.com/2/dm_events"
  168. params = {
  169. "dm_event.fields": "id,event_type,text,created_at,dm_conversation_id,sender_id,participant_ids,referenced_tweets,attachments",
  170. "expansions": ",".join(["sender_id", "participant_ids"]),
  171. "user.fields": ",".join(["id", "created_at", "name", "username", "location", "profile_image_url", "url", "verified"])
  172. }
  173. if max_results:
  174. params['max_results'] = max_results
  175. if pagination_token:
  176. params['pagination_token'] = pagination_token
  177. headers = {"Authorization": "Bearer {}".format(self.token)}
  178. response = requests.get(url, params=params, headers=headers)
  179. response_json = json.loads(response.text)
  180. typed_resp = from_dict(data=response_json, data_class=DMEventsResponse)
  181. return typed_resp
  182. def get_conversation (self, dm_conversation_id,
  183. max_results = None, pagination_token = None):
  184. return
  185. def get_conversation_with_user (self, user_id,
  186. max_results = None, pagination_token = None):
  187. return
  188. def send_message (self, dm_conversation_id, text, attachments = None):
  189. url = f'/2/dm_conversations/{dm_conversation_id}/messages'
  190. body = {
  191. 'text': text
  192. }
  193. if attachments:
  194. body['attachments'] = attachments
  195. headers = {"Authorization": "Bearer {}".format(self.token)}
  196. resp = requests.post(url, data=json.dumps(body), headers=headers)
  197. result = json.loads(resp.text)
  198. example_resp_text = """
  199. {
  200. "dm_conversation_id": "1346889436626259968",
  201. "dm_event_id": "128341038123"
  202. }
  203. """
  204. return result
  205. class ApiV2TweetSource:
  206. def __init__ (self, token):
  207. self.token = token
  208. def create_tweet (self, text,
  209. reply_to_tweet_id = None, quote_tweet_id = None):
  210. url = "https://api.twitter.com/2/tweets"
  211. tweet = {
  212. 'text': text
  213. }
  214. if reply_to_tweet_id:
  215. tweet['reply'] = {
  216. 'in_reply_to_tweet_id': reply_to_tweet_id
  217. }
  218. if quote_tweet_id:
  219. tweet['quote_tweet_id'] = quote_tweet_id
  220. body = json.dumps(tweet)
  221. headers = {
  222. 'Authorization': 'Bearer {}'.format(self.token),
  223. 'Content-Type': 'application/json'
  224. }
  225. response = requests.post(url, data=body, headers=headers)
  226. result = json.loads(response.text)
  227. return result
  228. def retweet (self, tweet_id, user_id):
  229. url = "https://api.twitter.com/2/users/{}/retweets".format(user_id)
  230. retweet = {
  231. 'tweet_id': tweet_id
  232. }
  233. body = json.dumps(retweet)
  234. headers = {
  235. 'Authorization': 'Bearer {}'.format(self.token),
  236. 'Content-Type': 'application/json'
  237. }
  238. response = requests.post(url, data=body, headers=headers)
  239. result = json.loads(response.text)
  240. return result
  241. def bookmark (self, tweet_id, user_id):
  242. url = "https://api.twitter.com/2/users/{}/bookmarks".format(user_id)
  243. bookmark = {
  244. 'tweet_id': tweet_id
  245. }
  246. body = json.dumps(bookmark)
  247. headers = {
  248. 'Authorization': 'Bearer {}'.format(self.token),
  249. 'Content-Type': 'application/json'
  250. }
  251. response = requests.post(url, data=body, headers=headers)
  252. result = json.loads(response.text)
  253. return result
  254. def delete_bookmark (self, tweet_id, user_id):
  255. url = "https://api.twitter.com/2/users/{}/bookmarks/{}".format(user_id, tweet_id)
  256. headers = {
  257. 'Authorization': 'Bearer {}'.format(self.token)
  258. }
  259. response = requests.delete(url, headers=headers)
  260. result = json.loads(response.text)
  261. return result
  262. def get_home_timeline (self, user_id, variant = 'reverse_chronological', max_results = 10, pagination_token = None, since_id = None, until_id = None, end_time = None) -> TweetSearchResponse:
  263. """
  264. Get a user's timeline as viewed by the user themselves.
  265. """
  266. path = 'users/{}/timelines/{}'.format(user_id, variant)
  267. return self.get_timeline(path,
  268. max_results=max_results, pagination_token=pagination_token, since_id=since_id, until_id=until_id, end_time=end_time, return_dataclass=True)
  269. def get_timeline (self, path,
  270. max_results = 10, pagination_token = None, since_id = None,
  271. until_id = None,
  272. end_time = None,
  273. non_public_metrics = False,
  274. exclude_replies=False,
  275. exclude_retweets=False,
  276. return_dataclass=False):
  277. """
  278. Get any timeline, including custom curated timelines built by Tweet Deck / ApiV11.
  279. """
  280. token = self.token
  281. url = "https://api.twitter.com/2/{}".format(path)
  282. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  283. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  284. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  285. expansions = ["entities.mentions.username",
  286. "attachments.media_keys",
  287. "author_id",
  288. "referenced_tweets.id",
  289. "referenced_tweets.id.author_id"]
  290. if non_public_metrics:
  291. tweet_fields.append("non_public_metrics")
  292. media_fields.append("non_public_metrics")
  293. params = {
  294. "expansions": ",".join(expansions),
  295. "media.fields": ",".join(media_fields),
  296. "tweet.fields": ",".join(tweet_fields),
  297. "user.fields": ",".join(user_fields),
  298. "max_results": max_results,
  299. }
  300. exclude = []
  301. if exclude_replies:
  302. exclude.append('replies')
  303. if exclude_retweets:
  304. exclude.append('retweets')
  305. if len(exclude):
  306. print(f'get_timeline exclude={exclude}')
  307. params['exclude'] = ','.join(exclude)
  308. if pagination_token:
  309. params['pagination_token'] = pagination_token
  310. if since_id:
  311. params['since_id'] = since_id
  312. if until_id:
  313. params['until_id'] = until_id
  314. if end_time:
  315. params['end_time'] = end_time
  316. headers = {"Authorization": "Bearer {}".format(token)}
  317. #headers = {"Authorization": "access_token {}".format(access_token)}
  318. response = requests.get(url, params=params, headers=headers)
  319. response_json = json.loads(response.text)
  320. try:
  321. #print(json.dumps(response_json, indent = 2))
  322. typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
  323. except:
  324. print('error converting response to dataclass')
  325. print(json.dumps(response_json, indent = 2))
  326. if not return_dataclass:
  327. return response_json
  328. raise 'error converting response to dataclass'
  329. if return_dataclass:
  330. return typed_resp
  331. checked_resp = cleandict(asdict(typed_resp))
  332. print('using checked response to get_timeline')
  333. #print(json.dumps(checked_resp, indent=2))
  334. #print('og=')
  335. #print(json.dumps(response_json, indent=2))
  336. return checked_resp
  337. def get_mentions_timeline (self, user_id,
  338. max_results = 10, pagination_token = None, since_id = None, return_dataclass=False):
  339. path = "users/{}/mentions".format(user_id)
  340. return self.get_timeline(path,
  341. max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
  342. def get_user_timeline (self, user_id,
  343. max_results = 10, pagination_token = None, since_id = None,
  344. non_public_metrics=False,
  345. exclude_replies=False,
  346. exclude_retweets=False,
  347. return_dataclass=False):
  348. """
  349. Get a user's Tweets as viewed by another.
  350. """
  351. path = "users/{}/tweets".format(user_id)
  352. return self.get_timeline(path,
  353. max_results=max_results, pagination_token=pagination_token, since_id=since_id,
  354. non_public_metrics = non_public_metrics,
  355. exclude_replies=exclude_replies, exclude_retweets=exclude_retweets, return_dataclass=return_dataclass)
  356. def get_tweet (self, id_, non_public_metrics = False, return_dataclass=False):
  357. return self.get_tweets([id_], non_public_metrics = non_public_metrics, return_dataclass=return_dataclass)
  358. def get_tweets (self,
  359. ids,
  360. non_public_metrics = False,
  361. return_dataclass = False):
  362. token = self.token
  363. url = "https://api.twitter.com/2/tweets"
  364. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  365. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  366. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  367. expansions = ["entities.mentions.username",
  368. "attachments.media_keys",
  369. "author_id",
  370. "referenced_tweets.id",
  371. "referenced_tweets.id.author_id"]
  372. if non_public_metrics:
  373. tweet_fields.append("non_public_metrics")
  374. media_fields.append("non_public_metrics")
  375. params = {
  376. "ids": ','.join(ids),
  377. "expansions": ",".join(expansions),
  378. "media.fields": ",".join(media_fields),
  379. "tweet.fields": ",".join(tweet_fields),
  380. "user.fields": ",".join(user_fields)
  381. }
  382. headers = {"Authorization": "Bearer {}".format(token)}
  383. #print(params)
  384. response = requests.get(url, params=params, headers=headers)
  385. response_json = json.loads(response.text)
  386. print(json.dumps(response_json, indent=2))
  387. typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
  388. if return_dataclass:
  389. return typed_resp
  390. checked_resp = cleandict(asdict(typed_resp))
  391. print('using checked response to search_tweets')
  392. return checked_resp
  393. def search_tweets (self,
  394. query,
  395. pagination_token = None,
  396. since_id = None,
  397. max_results = 10,
  398. sort_order = None,
  399. non_public_metrics = False,
  400. return_dataclass = False
  401. ):
  402. token = self.token
  403. url = "https://api.twitter.com/2/tweets/search/recent"
  404. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  405. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  406. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  407. expansions = ["entities.mentions.username",
  408. "attachments.media_keys",
  409. "author_id",
  410. "referenced_tweets.id",
  411. "referenced_tweets.id.author_id"]
  412. if non_public_metrics:
  413. tweet_fields.append("non_public_metrics")
  414. media_fields.append("non_public_metrics")
  415. params = {
  416. "expansions": ",".join(expansions),
  417. "media.fields": ",".join(media_fields),
  418. "tweet.fields": ",".join(tweet_fields),
  419. "user.fields": ",".join(user_fields),
  420. "query": query,
  421. "max_results": max_results,
  422. }
  423. if pagination_token:
  424. params['pagination_token'] = pagination_token
  425. if since_id:
  426. params['since_id'] = since_id
  427. if sort_order:
  428. params['sort_order'] = sort_order
  429. headers = {"Authorization": "Bearer {}".format(token)}
  430. response = requests.get(url, params=params, headers=headers)
  431. response_json = json.loads(response.text)
  432. try:
  433. typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
  434. except:
  435. print('error converting tweet search response to TweetSearchResponse')
  436. print(response_json)
  437. raise 'error converting tweet search response to TweetSearchResponse'
  438. if return_dataclass:
  439. return typed_resp
  440. checked_resp = cleandict(asdict(typed_resp))
  441. print('using checked response to search_tweets')
  442. return checked_resp
  443. def count_tweets (self,
  444. query,
  445. since_id = None,
  446. granularity = 'hour'
  447. ):
  448. """
  449. App rate limit (Application-only): 300 requests per 15-minute window shared among all users of your app = once per 3 seconds.
  450. """
  451. token = self.token
  452. url = "https://api.twitter.com/2/tweets/counts/recent"
  453. params = {
  454. "query": query
  455. }
  456. if since_id:
  457. params['since_id'] = since_id
  458. headers = {"Authorization": "Bearer {}".format(token)}
  459. response = requests.get(url, params=params, headers=headers)
  460. #print(response.status_code)
  461. #print(response.text)
  462. response_json = json.loads(response.text)
  463. return response_json
  464. #def get_conversation (self, tweet_id, pagination_token = None,
  465. # TODO
  466. def get_thread (self, tweet_id,
  467. author_id = None,
  468. only_replies = False,
  469. pagination_token = None,
  470. since_id = None,
  471. max_results = 10,
  472. sort_order = None,
  473. return_dataclass=False
  474. ):
  475. # FIXME author_id can be determined from a Tweet object
  476. query = ""
  477. if author_id:
  478. query += " from:{}".format(author_id)
  479. if only_replies:
  480. query += " in_reply_to_tweet_id:{}".format(tweet_id)
  481. else:
  482. query += " conversation_id:{}".format(tweet_id)
  483. print("get_thread query=" + query)
  484. return self.search_tweets(query,
  485. pagination_token = pagination_token, since_id = since_id, max_results = max_results, sort_order = sort_order,
  486. return_dataclass=return_dataclass)
  487. def get_bookmarks (self, user_id,
  488. max_results = 10, pagination_token = None, since_id = None,
  489. return_dataclass=False):
  490. path = "users/{}/bookmarks".format(user_id)
  491. return self.get_timeline(path,
  492. max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
  493. def get_media_tweets (self,
  494. author_id = None,
  495. has_media = True,
  496. has_links = None,
  497. has_images = None,
  498. has_videos = None,
  499. is_reply = None,
  500. is_retweet = None,
  501. pagination_token = None,
  502. since_id = None,
  503. max_results = 10,
  504. sort_order = None,
  505. return_dataclass=False
  506. ):
  507. # FIXME author_id can be determined from a Tweet object
  508. query = ""
  509. if has_media != None:
  510. if not has_media:
  511. query += "-"
  512. query += "has:media "
  513. if has_links != None:
  514. if not has_links:
  515. query += " -"
  516. query += "has:links "
  517. if has_images != None:
  518. if not has_images:
  519. query += " -"
  520. query += "has:images "
  521. if has_videos != None:
  522. if not has_videos:
  523. query += " -"
  524. query += "has:videos "
  525. if is_reply != None:
  526. if not is_reply:
  527. query += " -"
  528. query += "is:reply "
  529. if is_retweet != None:
  530. if not is_retweet:
  531. query += " -"
  532. query += "is:retweet "
  533. if author_id:
  534. query += "from:{} ".format(author_id)
  535. return self.search_tweets(query,
  536. pagination_token = pagination_token, since_id = since_id, max_results = max_results, sort_order = sort_order, return_dataclass = return_dataclass)
  537. def get_retweets (self, tweet_id):
  538. # GET /2/tweets/:id/retweeted_by
  539. return
  540. def get_quote_tweets( self, tweet_id):
  541. # GET /2/tweets/:id/quote_tweets
  542. return
  543. def get_liked_tweets (self, user_id,
  544. max_results = 10, pagination_token = None, since_id = None, return_dataclass=False):
  545. # GET /2/users/:id/liked_tweets
  546. # User rate limit (User context): 75 requests per 15-minute window per each authenticated user
  547. path = "users/{}/liked_tweets".format(user_id)
  548. return self.get_timeline(path,
  549. max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
  550. def get_liking_users (self, tweet_id,
  551. max_results = None, pagination_token = None):
  552. # GET /2/tweets/:id/liking_users
  553. # User rate limit (User context): 75 requests per 15-minute window per each authenticated user
  554. url = f"https://api.twitter.com/2/tweets/{tweet_id}/liking_users"
  555. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
  556. expansions = ["author_id"]
  557. params = cleandict({
  558. "user.fields": ','.join(user_fields),
  559. "max_results": max_results,
  560. "pagination_token": pagination_token,
  561. "expansions": ','.join(expansions),
  562. })
  563. headers = {
  564. "Authorization": f"Bearer {self.token}"
  565. }
  566. resp = requests.get(url, headers=headers, params=params)
  567. response_json = json.loads(resp.text)
  568. return response_json
  569. def like_tweet (self, tweet_id):
  570. # POST /2/users/:user_id/likes
  571. # {id: tweet_id}
  572. return
  573. def get_list_tweets (self, list_id):
  574. # GET /2/lists/:id/tweets
  575. return
  576. def cleandict(d):
  577. if isinstance(d, dict):
  578. return {k: cleandict(v) for k, v in d.items() if v is not None}
  579. elif isinstance(d, list):
  580. return [cleandict(v) for v in d]
  581. else:
  582. return d