api.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. from dataclasses import asdict
  2. from typing import List
  3. from dacite import from_dict
  4. import json
  5. import requests
  6. import sqlite3
  7. from twitter_v2.types import TweetSearchResponse, DMEventsResponse, UserSearchResponse
  8. # https://developer.twitter.com/en/docs/twitter-api/v1/tweets/curate-a-collection/api-reference/get-collections-entries
  9. # we can perhaps steal a token from the TweetDeck Console, otherwise we need to apply for Standard v1.1 / Elevated
  10. class ApiV11TweetCollectionSource:
  11. def __init__ (self, token):
  12. self.token = token
  13. def create_collection (self, name):
  14. return
  15. def bulk_add_to_collection (self, collection_id, items):
  16. return
  17. def add_to_collection (self, collection_id, item):
  18. return
  19. def get_collection_tweets (self, collection_id):
  20. return
  21. class TwitterApiV2SocialGraph:
  22. def __init__ (self, token):
  23. self.token = token
  24. def get_user (self, user_id, is_username=False, return_dataclass=False):
  25. # GET /2/users/:id
  26. # GET /2/users/by/:username
  27. return self.get_users([user_id], is_username, return_dataclass=return_dataclass)
  28. def get_users (self, user_ids, are_usernames=False, return_dataclass=False):
  29. # GET /2/users/by?usernames=
  30. # GET /2/users?ids=
  31. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
  32. params = {
  33. 'user.fields' : ','.join(user_fields),
  34. }
  35. if are_usernames:
  36. url = "https://api.twitter.com/2/users/by"
  37. params['usernames'] = user_ids
  38. else:
  39. url = "https://api.twitter.com/2/users"
  40. params['ids'] = user_ids
  41. headers = {
  42. 'Authorization': 'Bearer {}'.format(self.token)
  43. }
  44. response = requests.get(url, params=params, headers=headers)
  45. result = json.loads(response.text)
  46. return self._parse_user_search_response(result, return_dataclass=return_dataclass)
  47. def get_following (self, user_id,
  48. max_results = 50, pagination_token = None, return_dataclass=False):
  49. # GET /2/users/:id/following
  50. url = "https://api.twitter.com/2/users/{}/following".format(user_id)
  51. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified"]
  52. params = {
  53. 'user.fields' : ','.join(user_fields),
  54. 'max_results': max_results
  55. }
  56. if pagination_token:
  57. params['pagination_token'] = pagination_token
  58. headers = {
  59. 'Authorization': 'Bearer {}'.format(self.token)
  60. }
  61. response = requests.get(url, params=params, headers=headers)
  62. result = json.loads(response.text)
  63. return self._parse_user_search_response(result, return_dataclass=return_dataclass)
  64. def _parse_user_search_response (self, result, return_dataclass=True):
  65. typed_result = from_dict(data_class=UserSearchResponse, data=result)
  66. if return_dataclass:
  67. return typed_result
  68. result = cleandict(asdict(typed_result))
  69. return result
  70. def get_followers (self, user_id,
  71. max_results = 50, pagination_token = None, return_dataclass=False):
  72. # GET /2/users/:id/followers
  73. url = "https://api.twitter.com/2/users/{}/followers".format(user_id)
  74. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
  75. params = {
  76. 'user.fields' : ','.join(user_fields),
  77. 'max_results': max_results
  78. }
  79. if pagination_token:
  80. params['pagination_token'] = pagination_token
  81. headers = {
  82. 'Authorization': 'Bearer {}'.format(self.token)
  83. }
  84. response = requests.get(url, params=params, headers=headers)
  85. result = json.loads(response.text)
  86. return self._parse_user_search_response(result, return_dataclass=return_dataclass)
  87. def follow_user (self, user_id, target_user_id):
  88. # POST /2/users/:id/following
  89. # {target_user_id}
  90. return
  91. def unfollow_user (self, user_id, target_user_id):
  92. # DELETE /2/users/:source_user_id/following/:target_user_id
  93. return
  94. class ApiV2ConversationSource:
  95. def __init__ (self, token):
  96. self.token = token
  97. def get_recent_events (self, max_results = None, pagination_token = None):
  98. # https://developer.twitter.com/en/docs/twitter-api/direct-messages/lookup/api-reference/get-dm_events
  99. url = "https://api.twitter.com/2/dm_events"
  100. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  101. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  102. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  103. params = {
  104. "dm_event.fields": "id,event_type,text,created_at,dm_conversation_id,sender_id,participant_ids,referenced_tweets,attachments",
  105. "expansions": ",".join(["sender_id", "participant_ids", "referenced_tweets.id", "attachments.media_keys"]),
  106. "user.fields": ",".join(user_fields),
  107. "tweet.fields": ",".join(tweet_fields),
  108. "media.fields": ",".join(media_fields)
  109. }
  110. if max_results:
  111. params['max_results'] = max_results
  112. if pagination_token:
  113. params['pagination_token'] = pagination_token
  114. headers = {"Authorization": "Bearer {}".format(self.token)}
  115. response = requests.get(url, params=params, headers=headers)
  116. response_json = json.loads(response.text)
  117. #print(response_json)
  118. return self._parse_dm_events_response(response_json)
  119. def _parse_dm_events_response (self, response_json):
  120. typed_resp = from_dict(data=response_json, data_class=DMEventsResponse)
  121. return typed_resp
  122. def get_conversation (self, dm_conversation_id,
  123. max_results = None, pagination_token = None):
  124. return
  125. def get_conversation_with_user (self, user_id,
  126. max_results = None, pagination_token = None):
  127. return
  128. def send_message (self, dm_conversation_id, text, attachments = None):
  129. url = f'/2/dm_conversations/{dm_conversation_id}/messages'
  130. body = {
  131. 'text': text
  132. }
  133. if attachments:
  134. body['attachments'] = attachments
  135. headers = {"Authorization": "Bearer {}".format(self.token)}
  136. resp = requests.post(url, data=json.dumps(body), headers=headers)
  137. result = json.loads(resp.text)
  138. example_resp_text = """
  139. {
  140. "dm_conversation_id": "1346889436626259968",
  141. "dm_event_id": "128341038123"
  142. }
  143. """
  144. return result
  145. class ApiV2TweetSource:
  146. def __init__ (self, token):
  147. self.token = token
  148. def create_tweet (self, text,
  149. reply_to_tweet_id = None, quote_tweet_id = None):
  150. url = "https://api.twitter.com/2/tweets"
  151. tweet = {
  152. 'text': text
  153. }
  154. if reply_to_tweet_id:
  155. tweet['reply'] = {
  156. 'in_reply_to_tweet_id': reply_to_tweet_id
  157. }
  158. if quote_tweet_id:
  159. tweet['quote_tweet_id'] = quote_tweet_id
  160. body = json.dumps(tweet)
  161. headers = {
  162. 'Authorization': 'Bearer {}'.format(self.token),
  163. 'Content-Type': 'application/json'
  164. }
  165. response = requests.post(url, data=body, headers=headers)
  166. result = json.loads(response.text)
  167. return result
  168. def retweet (self, tweet_id, user_id):
  169. url = "https://api.twitter.com/2/users/{}/retweets".format(user_id)
  170. retweet = {
  171. 'tweet_id': tweet_id
  172. }
  173. body = json.dumps(retweet)
  174. headers = {
  175. 'Authorization': 'Bearer {}'.format(self.token),
  176. 'Content-Type': 'application/json'
  177. }
  178. response = requests.post(url, data=body, headers=headers)
  179. result = json.loads(response.text)
  180. return result
  181. def delete_retweet (self, tweet_id, user_id):
  182. url = "https://api.twitter.com/2/users/{}/retweets/{}".format(user_id, tweet_id)
  183. headers = {
  184. 'Authorization': 'Bearer {}'.format(self.token)
  185. }
  186. response = requests.delete(url, headers=headers)
  187. result = json.loads(response.text)
  188. return result
  189. def bookmark (self, tweet_id, user_id):
  190. url = "https://api.twitter.com/2/users/{}/bookmarks".format(user_id)
  191. bookmark = {
  192. 'tweet_id': tweet_id
  193. }
  194. body = json.dumps(bookmark)
  195. headers = {
  196. 'Authorization': 'Bearer {}'.format(self.token),
  197. 'Content-Type': 'application/json'
  198. }
  199. response = requests.post(url, data=body, headers=headers)
  200. result = json.loads(response.text)
  201. return result
  202. def delete_bookmark (self, tweet_id, user_id):
  203. url = "https://api.twitter.com/2/users/{}/bookmarks/{}".format(user_id, tweet_id)
  204. headers = {
  205. 'Authorization': 'Bearer {}'.format(self.token)
  206. }
  207. response = requests.delete(url, headers=headers)
  208. print(response.status_code)
  209. result = json.loads(response.text)
  210. return result
  211. def get_home_timeline (self, user_id, variant = 'reverse_chronological', max_results = 10, pagination_token = None, since_id = None, until_id = None, end_time = None, start_time=None) -> TweetSearchResponse:
  212. """
  213. Get a user's timeline as viewed by the user themselves.
  214. """
  215. path = 'users/{}/timelines/{}'.format(user_id, variant)
  216. return self.get_timeline(path,
  217. max_results=max_results, pagination_token=pagination_token, since_id=since_id, until_id=until_id, end_time=end_time, start_time=start_time, return_dataclass=True)
  218. def get_timeline (self, path,
  219. max_results = 10, pagination_token = None, since_id = None,
  220. until_id = None,
  221. end_time = None,
  222. start_time = None,
  223. non_public_metrics = False,
  224. exclude_replies=False,
  225. exclude_retweets=False,
  226. return_dataclass=False):
  227. """
  228. Get any timeline, including custom curated timelines built by Tweet Deck / ApiV11.
  229. Max 3,200 for Essential access, and 800 if exclude_replies=True
  230. """
  231. token = self.token
  232. url = "https://api.twitter.com/2/{}".format(path)
  233. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  234. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  235. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  236. expansions = ["entities.mentions.username",
  237. "attachments.media_keys",
  238. "author_id",
  239. "referenced_tweets.id",
  240. "referenced_tweets.id.author_id"]
  241. if non_public_metrics:
  242. tweet_fields.append("non_public_metrics")
  243. media_fields.append("non_public_metrics")
  244. params = {
  245. "expansions": ",".join(expansions),
  246. "media.fields": ",".join(media_fields),
  247. "tweet.fields": ",".join(tweet_fields),
  248. "user.fields": ",".join(user_fields),
  249. "max_results": max_results,
  250. }
  251. exclude = []
  252. if exclude_replies:
  253. exclude.append('replies')
  254. if exclude_retweets:
  255. exclude.append('retweets')
  256. if len(exclude):
  257. print(f'get_timeline exclude={exclude}')
  258. params['exclude'] = ','.join(exclude)
  259. if pagination_token:
  260. params['pagination_token'] = pagination_token
  261. if since_id:
  262. params['since_id'] = since_id
  263. if until_id:
  264. params['until_id'] = until_id
  265. if end_time:
  266. params['end_time'] = end_time
  267. if start_time:
  268. params['start_time'] = start_time
  269. headers = {"Authorization": "Bearer {}".format(token)}
  270. #headers = {"Authorization": "access_token {}".format(access_token)}
  271. response = requests.get(url, params=params, headers=headers)
  272. response_json = json.loads(response.text)
  273. print(json.dumps(response_json, indent=2))
  274. return self._parse_tweet_search_response(response_json, return_dataclass=return_dataclass)
  275. def _parse_tweet_search_response (self, response_json, return_dataclass=True):
  276. try:
  277. #print(json.dumps(response_json, indent = 2))
  278. typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
  279. except:
  280. print('error converting response to dataclass')
  281. print(json.dumps(response_json, indent = 2))
  282. if not return_dataclass:
  283. return response_json
  284. raise 'error converting response to dataclass'
  285. if return_dataclass:
  286. return typed_resp
  287. checked_resp = cleandict(asdict(typed_resp))
  288. print('using checked response to get_timeline')
  289. #print(json.dumps(checked_resp, indent=2))
  290. #print('og=')
  291. #print(json.dumps(response_json, indent=2))
  292. return checked_resp
  293. def get_mentions_timeline (self, user_id,
  294. max_results = 10, pagination_token = None, since_id = None, return_dataclass=False):
  295. path = "users/{}/mentions".format(user_id)
  296. return self.get_timeline(path,
  297. max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
  298. def get_user_timeline (self, user_id,
  299. max_results = 10, pagination_token = None,
  300. since_id = None,
  301. until_id = None,
  302. start_time = None,
  303. end_time = None,
  304. non_public_metrics=False,
  305. exclude_replies=False,
  306. exclude_retweets=False,
  307. return_dataclass=False):
  308. """
  309. Get a user's Tweets as viewed by another.
  310. """
  311. path = "users/{}/tweets".format(user_id)
  312. return self.get_timeline(path,
  313. max_results=max_results, pagination_token=pagination_token, since_id=since_id,
  314. until_id=until_id,start_time=start_time,
  315. non_public_metrics = non_public_metrics,
  316. exclude_replies=exclude_replies, exclude_retweets=exclude_retweets, return_dataclass=return_dataclass)
  317. def get_tweet (self, id_, non_public_metrics = False, return_dataclass=False):
  318. return self.get_tweets([id_], non_public_metrics = non_public_metrics, return_dataclass=return_dataclass)
  319. def get_tweets (self,
  320. ids,
  321. non_public_metrics = False,
  322. return_dataclass = False):
  323. token = self.token
  324. url = "https://api.twitter.com/2/tweets"
  325. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  326. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  327. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  328. expansions = ["entities.mentions.username",
  329. "attachments.media_keys",
  330. "author_id",
  331. "referenced_tweets.id",
  332. "referenced_tweets.id.author_id"]
  333. if non_public_metrics:
  334. tweet_fields.append("non_public_metrics")
  335. media_fields.append("non_public_metrics")
  336. params = {
  337. "ids": ','.join(ids),
  338. "expansions": ",".join(expansions),
  339. "media.fields": ",".join(media_fields),
  340. "tweet.fields": ",".join(tweet_fields),
  341. "user.fields": ",".join(user_fields)
  342. }
  343. headers = {"Authorization": "Bearer {}".format(token)}
  344. #print(params)
  345. response = requests.get(url, params=params, headers=headers)
  346. response_json = json.loads(response.text)
  347. print(json.dumps(response_json, indent=2))
  348. return self._parse_tweet_search_response(response_json, return_dataclass=return_dataclass)
  349. def search_tweets (self,
  350. query,
  351. pagination_token = None,
  352. since_id = None,
  353. until_id = None,
  354. start_time = None,
  355. end_time = None,
  356. max_results = 10,
  357. sort_order = None,
  358. non_public_metrics = False,
  359. return_dataclass = False
  360. ):
  361. token = self.token
  362. url = "https://api.twitter.com/2/tweets/search/recent"
  363. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  364. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  365. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  366. expansions = ["entities.mentions.username",
  367. "attachments.media_keys",
  368. "author_id",
  369. "referenced_tweets.id",
  370. "referenced_tweets.id.author_id"]
  371. if non_public_metrics:
  372. tweet_fields.append("non_public_metrics")
  373. media_fields.append("non_public_metrics")
  374. params = {
  375. "expansions": ",".join(expansions),
  376. "media.fields": ",".join(media_fields),
  377. "tweet.fields": ",".join(tweet_fields),
  378. "user.fields": ",".join(user_fields),
  379. "query": query,
  380. "max_results": max_results,
  381. }
  382. if pagination_token:
  383. params['pagination_token'] = pagination_token
  384. if since_id:
  385. params['since_id'] = since_id
  386. if until_id:
  387. params['until_id'] = until_id
  388. if start_time:
  389. params['start_time'] = start_time
  390. if end_time:
  391. params['end_time'] = end_time
  392. if sort_order:
  393. params['sort_order'] = sort_order
  394. headers = {"Authorization": "Bearer {}".format(token)}
  395. response = requests.get(url, params=params, headers=headers)
  396. response_json = json.loads(response.text)
  397. return self._parse_tweet_search_response(response_json, return_dataclass=return_dataclass)
  398. def count_tweets (self,
  399. query,
  400. since_id = None,
  401. granularity = 'hour'
  402. ):
  403. """
  404. App rate limit (Application-only): 300 requests per 15-minute window shared among all users of your app = once per 3 seconds.
  405. """
  406. token = self.token
  407. url = "https://api.twitter.com/2/tweets/counts/recent"
  408. params = {
  409. "query": query
  410. }
  411. if since_id:
  412. params['since_id'] = since_id
  413. headers = {"Authorization": "Bearer {}".format(token)}
  414. response = requests.get(url, params=params, headers=headers)
  415. #print(response.status_code)
  416. #print(response.text)
  417. response_json = json.loads(response.text)
  418. return response_json
  419. #def get_conversation (self, tweet_id, pagination_token = None,
  420. # TODO
  421. def get_thread (self, tweet_id,
  422. author_id = None,
  423. only_replies = False,
  424. pagination_token = None,
  425. since_id = None,
  426. max_results = 10,
  427. sort_order = None,
  428. return_dataclass=False
  429. ):
  430. # FIXME author_id can be determined from a Tweet object
  431. query = ""
  432. if author_id:
  433. query += " from:{}".format(author_id)
  434. if only_replies:
  435. query += " in_reply_to_tweet_id:{}".format(tweet_id)
  436. else:
  437. query += " conversation_id:{}".format(tweet_id)
  438. print("get_thread query=" + query)
  439. return self.search_tweets(query,
  440. pagination_token = pagination_token, since_id = since_id, max_results = max_results, sort_order = sort_order,
  441. return_dataclass=return_dataclass)
  442. def get_bookmarks (self, user_id,
  443. max_results = 10, pagination_token = None, since_id = None,
  444. return_dataclass=False):
  445. path = "users/{}/bookmarks".format(user_id)
  446. return self.get_timeline(path,
  447. max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
  448. def get_media_tweets (self,
  449. author_id = None,
  450. has_media = True,
  451. has_links = None,
  452. has_images = None,
  453. has_videos = None,
  454. is_reply = None,
  455. is_retweet = None,
  456. pagination_token = None,
  457. since_id = None,
  458. max_results = 10,
  459. sort_order = None,
  460. return_dataclass=False
  461. ):
  462. # FIXME author_id can be determined from a Tweet object
  463. query = ""
  464. if has_media != None:
  465. if not has_media:
  466. query += "-"
  467. query += "has:media "
  468. if has_links != None:
  469. if not has_links:
  470. query += " -"
  471. query += "has:links "
  472. if has_images != None:
  473. if not has_images:
  474. query += " -"
  475. query += "has:images "
  476. if has_videos != None:
  477. if not has_videos:
  478. query += " -"
  479. query += "has:videos "
  480. if is_reply != None:
  481. if not is_reply:
  482. query += " -"
  483. query += "is:reply "
  484. if is_retweet != None:
  485. if not is_retweet:
  486. query += " -"
  487. query += "is:retweet "
  488. if author_id:
  489. query += "from:{} ".format(author_id)
  490. return self.search_tweets(query,
  491. pagination_token = pagination_token, since_id = since_id, max_results = max_results, sort_order = sort_order, return_dataclass = return_dataclass)
  492. def get_retweets (self, tweet_id):
  493. # GET /2/tweets/:id/retweeted_by
  494. return
  495. def get_quote_tweets( self, tweet_id):
  496. # GET /2/tweets/:id/quote_tweets
  497. return
  498. def get_liked_tweets (self, user_id,
  499. max_results = 10, pagination_token = None, since_id = None, return_dataclass=False):
  500. # GET /2/users/:id/liked_tweets
  501. # User rate limit (User context): 75 requests per 15-minute window per each authenticated user
  502. path = "users/{}/liked_tweets".format(user_id)
  503. print('get_liked_tweets')
  504. return self.get_timeline(path,
  505. max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
  506. def get_liking_users (self, tweet_id,
  507. max_results = None, pagination_token = None,
  508. return_dataclass=False):
  509. # GET /2/tweets/:id/liking_users
  510. # User rate limit (User context): 75 requests per 15-minute window per each authenticated user
  511. url = f"https://api.twitter.com/2/tweets/{tweet_id}/liking_users"
  512. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
  513. expansions = []
  514. params = cleandict({
  515. "user.fields": ','.join(user_fields),
  516. "max_results": max_results,
  517. "pagination_token": pagination_token,
  518. "expansions": ','.join(expansions),
  519. })
  520. headers = {
  521. "Authorization": f"Bearer {self.token}"
  522. }
  523. resp = requests.get(url, headers=headers, params=params)
  524. result = json.loads(resp.text)
  525. return self._parse_user_search_response(result, return_dataclass=return_dataclass)
  526. def like_tweet (self, tweet_id):
  527. # POST /2/users/:user_id/likes
  528. # {id: tweet_id}
  529. return
  530. def delete_like (self, tweet_id, user_id):
  531. url = "https://api.twitter.com/2/users/{}/likes/{}".format(user_id, tweet_id)
  532. headers = {
  533. 'Authorization': 'Bearer {}'.format(self.token)
  534. }
  535. response = requests.delete(url, headers=headers)
  536. print(response.status_code)
  537. result = json.loads(response.text)
  538. return result
  539. def get_list_tweets (self, list_id):
  540. # GET /2/lists/:id/tweets
  541. return
  542. def cleandict(d):
  543. if isinstance(d, dict):
  544. return {k: cleandict(v) for k, v in d.items() if v is not None}
  545. elif isinstance(d, list):
  546. return [cleandict(v) for v in d]
  547. else:
  548. return d