api.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806
  1. from dataclasses import asdict
  2. from typing import List
  3. from dacite import from_dict
  4. import json
  5. import requests
  6. import sqlite3
  7. from twitter_v2.types import TweetSearchResponse, DMEventsResponse, UserSearchResponse
  8. #BASE_URL = 'https://api.twitter.com'
  9. BASE_URL = 'http://localhost:5900'
  10. # https://developer.twitter.com/en/docs/twitter-api/v1/tweets/curate-a-collection/api-reference/get-collections-entries
  11. # we can perhaps steal a token from the TweetDeck Console, otherwise we need to apply for Standard v1.1 / Elevated
  12. class ApiV11TweetCollectionSource:
  13. def __init__ (self, token):
  14. self.token = token
  15. def create_collection (self, name):
  16. return
  17. def bulk_add_to_collection (self, collection_id, items):
  18. return
  19. def add_to_collection (self, collection_id, item):
  20. return
  21. def get_collection_tweets (self, collection_id):
  22. return
  23. class TwitterApiV2SocialGraph:
  24. def __init__ (self, token, base_url = BASE_URL):
  25. self.token = token
  26. self.base_url = base_url
  27. def get_user (self, user_id, is_username=False, return_dataclass=False):
  28. # GET /2/users/:id
  29. # GET /2/users/by/:username
  30. return self.get_users([user_id], is_username, return_dataclass=return_dataclass)
  31. def get_users (self, user_ids, are_usernames=False, return_dataclass=False):
  32. # GET /2/users/by?usernames=
  33. # GET /2/users?ids=
  34. print('get_users: ' + json.dumps(user_ids) )
  35. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
  36. params = {
  37. 'user.fields' : ','.join(user_fields),
  38. }
  39. if are_usernames:
  40. url = f"{self.base_url}/2/users/by"
  41. params['usernames'] = user_ids
  42. else:
  43. url = f"{self.base_url}/2/users"
  44. params['ids'] = user_ids
  45. headers = {
  46. 'Authorization': 'Bearer {}'.format(self.token)
  47. }
  48. response = requests.get(url, params=params, headers=headers)
  49. result = json.loads(response.text)
  50. print("get_users, response=")
  51. print(json.dumps(result, indent=2))
  52. return self._parse_user_search_response(result, return_dataclass=return_dataclass)
  53. def get_following (self, user_id,
  54. max_results = 50, pagination_token = None, return_dataclass=False):
  55. # GET /2/users/:id/following
  56. url = f"{self.base_url}/2/users/{user_id}/following"
  57. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified"]
  58. params = {
  59. 'user.fields' : ','.join(user_fields),
  60. 'max_results': max_results
  61. }
  62. if pagination_token:
  63. params['pagination_token'] = pagination_token
  64. headers = {
  65. 'Authorization': 'Bearer {}'.format(self.token)
  66. }
  67. response = requests.get(url, params=params, headers=headers)
  68. result = json.loads(response.text)
  69. return self._parse_user_search_response(result, return_dataclass=return_dataclass)
  70. def _parse_user_search_response (self, result, return_dataclass=True):
  71. typed_result = from_dict(data_class=UserSearchResponse, data=result)
  72. if return_dataclass:
  73. return typed_result
  74. result = cleandict(asdict(typed_result))
  75. return result
  76. def get_followers (self, user_id,
  77. max_results = 50, pagination_token = None, return_dataclass=False):
  78. # GET /2/users/:id/followers
  79. url = f"{self.base_url}/2/users/{user_id}/followers"
  80. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
  81. params = {
  82. 'user.fields' : ','.join(user_fields),
  83. 'max_results': max_results
  84. }
  85. if pagination_token:
  86. params['pagination_token'] = pagination_token
  87. headers = {
  88. 'Authorization': 'Bearer {}'.format(self.token)
  89. }
  90. response = requests.get(url, params=params, headers=headers)
  91. result = json.loads(response.text)
  92. return self._parse_user_search_response(result, return_dataclass=return_dataclass)
  93. def follow_user (self, user_id, target_user_id):
  94. # POST /2/users/:id/following
  95. # {target_user_id}
  96. return
  97. def unfollow_user (self, user_id, target_user_id):
  98. # DELETE /2/users/:source_user_id/following/:target_user_id
  99. return
  100. class ApiV2ConversationSource:
  101. def __init__ (self, token, base_url = BASE_URL):
  102. self.token = token
  103. self.base_url = base_url
  104. def get_recent_events (self, max_results = None, pagination_token = None):
  105. # https://developer.twitter.com/en/docs/twitter-api/direct-messages/lookup/api-reference/get-dm_events
  106. url = f"{self.base_url}/2/dm_events"
  107. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  108. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  109. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  110. params = {
  111. "dm_event.fields": "id,event_type,text,created_at,dm_conversation_id,sender_id,participant_ids,referenced_tweets,attachments",
  112. "expansions": ",".join(["sender_id", "participant_ids", "referenced_tweets.id", "attachments.media_keys"]),
  113. "user.fields": ",".join(user_fields),
  114. "tweet.fields": ",".join(tweet_fields),
  115. "media.fields": ",".join(media_fields)
  116. }
  117. if max_results:
  118. params['max_results'] = max_results
  119. if pagination_token:
  120. params['pagination_token'] = pagination_token
  121. headers = {"Authorization": "Bearer {}".format(self.token)}
  122. response = requests.get(url, params=params, headers=headers)
  123. response_json = json.loads(response.text)
  124. #print(response_json)
  125. return self._parse_dm_events_response(response_json)
  126. def _parse_dm_events_response (self, response_json):
  127. typed_resp = from_dict(data=response_json, data_class=DMEventsResponse)
  128. return typed_resp
  129. def get_conversation (self, dm_conversation_id,
  130. max_results = None, pagination_token = None):
  131. return
  132. def get_conversation_with_user (self, user_id,
  133. max_results = None, pagination_token = None):
  134. return
  135. def send_message (self, dm_conversation_id, text, attachments = None):
  136. url = f'{self.base_url}/2/dm_conversations/{dm_conversation_id}/messages'
  137. body = {
  138. 'text': text
  139. }
  140. if attachments:
  141. body['attachments'] = attachments
  142. headers = {"Authorization": "Bearer {}".format(self.token)}
  143. resp = requests.post(url, data=json.dumps(body), headers=headers)
  144. result = json.loads(resp.text)
  145. example_resp_text = """
  146. {
  147. "dm_conversation_id": "1346889436626259968",
  148. "dm_event_id": "128341038123"
  149. }
  150. """
  151. return result
  152. class ApiV2TweetSource:
  153. def __init__ (self, token, base_url = BASE_URL):
  154. self.token = token
  155. self.base_url = base_url
  156. def create_tweet (self, text,
  157. reply_to_tweet_id = None, quote_tweet_id = None):
  158. url = f"{self.base_url}/2/tweets"
  159. tweet = {
  160. 'text': text
  161. }
  162. if reply_to_tweet_id:
  163. tweet['reply'] = {
  164. 'in_reply_to_tweet_id': reply_to_tweet_id
  165. }
  166. if quote_tweet_id:
  167. tweet['quote_tweet_id'] = quote_tweet_id
  168. body = json.dumps(tweet)
  169. headers = {
  170. 'Authorization': 'Bearer {}'.format(self.token),
  171. 'Content-Type': 'application/json'
  172. }
  173. response = requests.post(url, data=body, headers=headers)
  174. result = json.loads(response.text)
  175. return result
  176. def retweet (self, tweet_id, user_id):
  177. url = f"{self.base_url}/2/users/{user_id}/retweets"
  178. retweet = {
  179. 'tweet_id': tweet_id
  180. }
  181. body = json.dumps(retweet)
  182. headers = {
  183. 'Authorization': 'Bearer {}'.format(self.token),
  184. 'Content-Type': 'application/json'
  185. }
  186. response = requests.post(url, data=body, headers=headers)
  187. result = json.loads(response.text)
  188. return result
  189. def delete_retweet (self, tweet_id, user_id):
  190. url = f"{self.base_url}/2/users/{user_id}/retweets/{tweet_id}"
  191. headers = {
  192. 'Authorization': 'Bearer {}'.format(self.token)
  193. }
  194. response = requests.delete(url, headers=headers)
  195. result = json.loads(response.text)
  196. return result
  197. def bookmark (self, tweet_id, user_id):
  198. url = f"{self.base_url}/2/users/{user_id}/bookmarks"
  199. bookmark = {
  200. 'tweet_id': tweet_id
  201. }
  202. body = json.dumps(bookmark)
  203. headers = {
  204. 'Authorization': 'Bearer {}'.format(self.token),
  205. 'Content-Type': 'application/json'
  206. }
  207. response = requests.post(url, data=body, headers=headers)
  208. result = json.loads(response.text)
  209. return result
  210. def delete_bookmark (self, tweet_id, user_id):
  211. url = f"{self.base_url}/2/users/{user_id}/bookmarks/{tweet_id}"
  212. headers = {
  213. 'Authorization': 'Bearer {}'.format(self.token)
  214. }
  215. response = requests.delete(url, headers=headers)
  216. print(response.status_code)
  217. result = json.loads(response.text)
  218. return result
  219. def get_home_timeline (self, user_id, variant = 'reverse_chronological', max_results = 10, pagination_token = None, since_id = None, until_id = None, end_time = None, start_time=None) -> TweetSearchResponse:
  220. """
  221. Get a user's timeline as viewed by the user themselves.
  222. """
  223. path = f'users/{user_id}/timelines/{variant}'
  224. return self.get_timeline(path,
  225. max_results=max_results, pagination_token=pagination_token, since_id=since_id, until_id=until_id, end_time=end_time, start_time=start_time, return_dataclass=True)
  226. def get_timeline (self, path,
  227. max_results = 10, pagination_token = None, since_id = None,
  228. until_id = None,
  229. end_time = None,
  230. start_time = None,
  231. non_public_metrics = False,
  232. exclude_replies=False,
  233. exclude_retweets=False,
  234. return_dataclass=False):
  235. """
  236. Get any timeline, including custom curated timelines built by Tweet Deck / ApiV11.
  237. Max 3,200 for Essential access, and 800 if exclude_replies=True
  238. """
  239. token = self.token
  240. url = f"{self.base_url}/2/{path}"
  241. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  242. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  243. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  244. expansions = ["entities.mentions.username",
  245. "attachments.media_keys",
  246. "author_id",
  247. "referenced_tweets.id",
  248. "referenced_tweets.id.author_id"]
  249. if non_public_metrics:
  250. tweet_fields.append("non_public_metrics")
  251. media_fields.append("non_public_metrics")
  252. params = {
  253. "expansions": ",".join(expansions),
  254. "media.fields": ",".join(media_fields),
  255. "tweet.fields": ",".join(tweet_fields),
  256. "user.fields": ",".join(user_fields),
  257. "max_results": max_results,
  258. }
  259. exclude = []
  260. if exclude_replies:
  261. exclude.append('replies')
  262. if exclude_retweets:
  263. exclude.append('retweets')
  264. if len(exclude):
  265. print(f'get_timeline exclude={exclude}')
  266. params['exclude'] = ','.join(exclude)
  267. if pagination_token:
  268. params['pagination_token'] = pagination_token
  269. if since_id:
  270. params['since_id'] = since_id
  271. if until_id:
  272. params['until_id'] = until_id
  273. if end_time:
  274. params['end_time'] = end_time
  275. if start_time:
  276. params['start_time'] = start_time
  277. headers = {"Authorization": "Bearer {}".format(token)}
  278. #headers = {"Authorization": "access_token {}".format(access_token)}
  279. response = requests.get(url, params=params, headers=headers)
  280. response_json = json.loads(response.text)
  281. print(json.dumps(response_json, indent=2))
  282. return self._parse_tweet_search_response(response_json, return_dataclass=return_dataclass)
  283. def _parse_tweet_search_response (self, response_json, return_dataclass=True):
  284. try:
  285. #print(json.dumps(response_json, indent = 2))
  286. typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
  287. except:
  288. print('error converting response to dataclass')
  289. print(json.dumps(response_json, indent = 2))
  290. if not return_dataclass:
  291. return response_json
  292. raise Exception('error converting response to dataclass')
  293. if return_dataclass:
  294. return typed_resp
  295. checked_resp = cleandict(asdict(typed_resp))
  296. print('using checked response to get_timeline')
  297. #print(json.dumps(checked_resp, indent=2))
  298. #print('og=')
  299. #print(json.dumps(response_json, indent=2))
  300. return checked_resp
  301. def get_mentions_timeline (self, user_id,
  302. max_results = 10, pagination_token = None, since_id = None, return_dataclass=False):
  303. path = "users/{}/mentions".format(user_id)
  304. return self.get_timeline(path,
  305. max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
  306. def get_user_timeline (self, user_id,
  307. max_results = 10, pagination_token = None,
  308. since_id = None,
  309. until_id = None,
  310. start_time = None,
  311. end_time = None,
  312. non_public_metrics=False,
  313. exclude_replies=False,
  314. exclude_retweets=False,
  315. return_dataclass=False):
  316. """
  317. Get a user's Tweets as viewed by another.
  318. """
  319. path = "users/{}/tweets".format(user_id)
  320. return self.get_timeline(path,
  321. max_results=max_results, pagination_token=pagination_token, since_id=since_id,
  322. until_id=until_id,start_time=start_time,
  323. non_public_metrics = non_public_metrics,
  324. exclude_replies=exclude_replies, exclude_retweets=exclude_retweets, return_dataclass=return_dataclass)
  325. def get_tweet (self, id_, non_public_metrics = False, return_dataclass=False):
  326. return self.get_tweets([id_], non_public_metrics = non_public_metrics, return_dataclass=return_dataclass)
  327. def get_tweets (self,
  328. ids,
  329. non_public_metrics = False,
  330. return_dataclass = False):
  331. token = self.token
  332. url = f"{self.base_url}/2/tweets"
  333. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  334. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  335. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  336. expansions = ["entities.mentions.username",
  337. "attachments.media_keys",
  338. "author_id",
  339. "referenced_tweets.id",
  340. "referenced_tweets.id.author_id"]
  341. if non_public_metrics:
  342. tweet_fields.append("non_public_metrics")
  343. media_fields.append("non_public_metrics")
  344. params = {
  345. "ids": ','.join(ids),
  346. "expansions": ",".join(expansions),
  347. "media.fields": ",".join(media_fields),
  348. "tweet.fields": ",".join(tweet_fields),
  349. "user.fields": ",".join(user_fields)
  350. }
  351. headers = {"Authorization": "Bearer {}".format(token)}
  352. #print(params)
  353. response = requests.get(url, params=params, headers=headers)
  354. response_json = json.loads(response.text)
  355. print(json.dumps(response_json, indent=2))
  356. return self._parse_tweet_search_response(response_json, return_dataclass=return_dataclass)
  357. def search_tweets (self,
  358. query,
  359. pagination_token = None,
  360. since_id = None,
  361. until_id = None,
  362. start_time = None,
  363. end_time = None,
  364. max_results = 10,
  365. sort_order = None,
  366. non_public_metrics = False,
  367. return_dataclass = False
  368. ):
  369. token = self.token
  370. url = f"{self.base_url}/2/tweets/search/recent"
  371. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  372. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  373. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  374. expansions = ["entities.mentions.username",
  375. "attachments.media_keys",
  376. "author_id",
  377. "referenced_tweets.id",
  378. "referenced_tweets.id.author_id"]
  379. if non_public_metrics:
  380. tweet_fields.append("non_public_metrics")
  381. media_fields.append("non_public_metrics")
  382. params = {
  383. "expansions": ",".join(expansions),
  384. "media.fields": ",".join(media_fields),
  385. "tweet.fields": ",".join(tweet_fields),
  386. "user.fields": ",".join(user_fields),
  387. "query": query,
  388. "max_results": max_results,
  389. }
  390. if pagination_token:
  391. params['pagination_token'] = pagination_token
  392. if since_id:
  393. params['since_id'] = since_id
  394. if until_id:
  395. params['until_id'] = until_id
  396. if start_time:
  397. params['start_time'] = start_time
  398. if end_time:
  399. params['end_time'] = end_time
  400. if sort_order:
  401. params['sort_order'] = sort_order
  402. headers = {"Authorization": "Bearer {}".format(token)}
  403. response = requests.get(url, params=params, headers=headers)
  404. response_json = json.loads(response.text)
  405. return self._parse_tweet_search_response(response_json, return_dataclass=return_dataclass)
  406. def count_tweets (self,
  407. query,
  408. since_id = None,
  409. granularity = 'hour'
  410. ):
  411. """
  412. App rate limit (Application-only): 300 requests per 15-minute window shared among all users of your app = once per 3 seconds.
  413. """
  414. token = self.token
  415. url = f"{self.base_url}/2/tweets/counts/recent"
  416. params = {
  417. "query": query
  418. }
  419. if since_id:
  420. params['since_id'] = since_id
  421. headers = {"Authorization": "Bearer {}".format(token)}
  422. response = requests.get(url, params=params, headers=headers)
  423. #print(response.status_code)
  424. #print(response.text)
  425. response_json = json.loads(response.text)
  426. return response_json
  427. #def get_conversation (self, tweet_id, pagination_token = None,
  428. # TODO
  429. def get_thread (self, tweet_id,
  430. author_id = None,
  431. only_replies = False,
  432. pagination_token = None,
  433. since_id = None,
  434. max_results = 10,
  435. sort_order = None,
  436. return_dataclass=False
  437. ):
  438. # FIXME author_id can be determined from a Tweet object
  439. query = ""
  440. if author_id:
  441. query += " from:{}".format(author_id)
  442. if only_replies:
  443. query += " in_reply_to_tweet_id:{}".format(tweet_id)
  444. else:
  445. query += " conversation_id:{}".format(tweet_id)
  446. print("get_thread query=" + query)
  447. return self.search_tweets(query,
  448. pagination_token = pagination_token, since_id = since_id, max_results = max_results, sort_order = sort_order,
  449. return_dataclass=return_dataclass)
  450. def get_bookmarks (self, user_id,
  451. max_results = 10, pagination_token = None, since_id = None,
  452. return_dataclass=False):
  453. path = "users/{}/bookmarks".format(user_id)
  454. return self.get_timeline(path,
  455. max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
  456. def get_media_tweets (self,
  457. author_id = None,
  458. has_media = True,
  459. has_links = None,
  460. has_images = None,
  461. has_videos = None,
  462. is_reply = None,
  463. is_retweet = None,
  464. pagination_token = None,
  465. since_id = None,
  466. max_results = 10,
  467. sort_order = None,
  468. return_dataclass=False
  469. ):
  470. # FIXME author_id can be determined from a Tweet object
  471. query = ""
  472. if has_media != None:
  473. if not has_media:
  474. query += "-"
  475. query += "has:media "
  476. if has_links != None:
  477. if not has_links:
  478. query += " -"
  479. query += "has:links "
  480. if has_images != None:
  481. if not has_images:
  482. query += " -"
  483. query += "has:images "
  484. if has_videos != None:
  485. if not has_videos:
  486. query += " -"
  487. query += "has:videos "
  488. if is_reply != None:
  489. if not is_reply:
  490. query += " -"
  491. query += "is:reply "
  492. if is_retweet != None:
  493. if not is_retweet:
  494. query += " -"
  495. query += "is:retweet "
  496. if author_id:
  497. query += "from:{} ".format(author_id)
  498. return self.search_tweets(query,
  499. pagination_token = pagination_token, since_id = since_id, max_results = max_results, sort_order = sort_order, return_dataclass = return_dataclass)
  500. def get_retweets (self, tweet_id):
  501. # GET /2/tweets/:id/retweeted_by
  502. return
  503. def get_quote_tweets( self, tweet_id):
  504. # GET /2/tweets/:id/quote_tweets
  505. return
  506. def get_liked_tweets (self, user_id,
  507. max_results = 10, pagination_token = None, since_id = None, return_dataclass=False):
  508. # GET /2/users/:id/liked_tweets
  509. # User rate limit (User context): 75 requests per 15-minute window per each authenticated user
  510. path = "users/{}/liked_tweets".format(user_id)
  511. print('get_liked_tweets')
  512. return self.get_timeline(path,
  513. max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
  514. def get_liking_users (self, tweet_id,
  515. max_results = None, pagination_token = None,
  516. return_dataclass=False):
  517. # GET /2/tweets/:id/liking_users
  518. # User rate limit (User context): 75 requests per 15-minute window per each authenticated user
  519. url = f"{self.base_url}/2/tweets/{tweet_id}/liking_users"
  520. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
  521. expansions = []
  522. params = cleandict({
  523. "user.fields": ','.join(user_fields),
  524. "max_results": max_results,
  525. "pagination_token": pagination_token,
  526. "expansions": ','.join(expansions),
  527. })
  528. headers = {
  529. "Authorization": f"Bearer {self.token}"
  530. }
  531. resp = requests.get(url, headers=headers, params=params)
  532. result = json.loads(resp.text)
  533. return self._parse_user_search_response(result, return_dataclass=return_dataclass)
  534. def like_tweet (self, tweet_id):
  535. # POST /2/users/:user_id/likes
  536. # {id: tweet_id}
  537. return
  538. def delete_like (self, tweet_id, user_id):
  539. url = f"{self.base_url}/2/users/{user_id}/likes/{tweet_id}"
  540. headers = {
  541. 'Authorization': 'Bearer {}'.format(self.token)
  542. }
  543. response = requests.delete(url, headers=headers)
  544. print(response.status_code)
  545. result = json.loads(response.text)
  546. return result
  547. def get_list_tweets (self, list_id):
  548. # GET /2/lists/:id/tweets
  549. return
  550. def cleandict(d):
  551. if isinstance(d, dict):
  552. return {k: cleandict(v) for k, v in d.items() if v is not None}
  553. elif isinstance(d, list):
  554. return [cleandict(v) for v in d]
  555. else:
  556. return d