api.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831
  1. from dataclasses import asdict
  2. from typing import List
  3. from dacite import from_dict
  4. import json
  5. import requests
  6. import sqlite3
  7. from twitter_v2.types import TweetSearchResponse, DMEventsResponse, UserSearchResponse
  8. # https://developer.twitter.com/en/docs/twitter-api/v1/tweets/curate-a-collection/api-reference/get-collections-entries
  9. # we can perhaps steal a token from the TweetDeck Console, otherwise we need to apply for Standard v1.1 / Elevated
  10. class ApiV11TweetCollectionSource:
  11. def __init__ (self, token):
  12. self.token = token
  13. def create_collection (self, name):
  14. return
  15. def bulk_add_to_collection (self, collection_id, items):
  16. return
  17. def add_to_collection (self, collection_id, item):
  18. return
  19. def get_collection_tweets (self, collection_id):
  20. return
  21. class TwitterApiV2SocialGraph:
  22. def __init__ (self, token):
  23. self.token = token
  24. def get_user (self, user_id, is_username=False, return_dataclass=False):
  25. # GET /2/users/:id
  26. # GET /2/users/by/:username
  27. return self.get_users([user_id], is_username, return_dataclass=return_dataclass)
  28. def get_users (self, user_ids, are_usernames=False, return_dataclass=False):
  29. # GET /2/users/by?usernames=
  30. # GET /2/users?ids=
  31. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
  32. params = {
  33. 'user.fields' : ','.join(user_fields),
  34. }
  35. if are_usernames:
  36. url = "https://api.twitter.com/2/users/by"
  37. params['usernames'] = user_ids
  38. else:
  39. url = "https://api.twitter.com/2/users"
  40. params['ids'] = user_ids
  41. headers = {
  42. 'Authorization': 'Bearer {}'.format(self.token)
  43. }
  44. response = requests.get(url, params=params, headers=headers)
  45. result = json.loads(response.text)
  46. typed_result = from_dict(data_class=UserSearchResponse, data=result)
  47. if return_dataclass:
  48. return typed_result
  49. result = cleandict(asdict(typed_result))
  50. return result
  51. def get_following (self, user_id,
  52. max_results = 50, pagination_token = None, return_dataclass=False):
  53. # GET /2/users/:id/following
  54. url = "https://api.twitter.com/2/users/{}/following".format(user_id)
  55. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified"]
  56. params = {
  57. 'user.fields' : ','.join(user_fields),
  58. 'max_results': max_results
  59. }
  60. if pagination_token:
  61. params['pagination_token'] = pagination_token
  62. headers = {
  63. 'Authorization': 'Bearer {}'.format(self.token)
  64. }
  65. response = requests.get(url, params=params, headers=headers)
  66. result = json.loads(response.text)
  67. typed_result = from_dict(data_class=UserSearchResponse, data=result)
  68. if return_dataclass:
  69. return typed_result
  70. result = cleandict(asdict(typed_result))
  71. return result
  72. def get_followers (self, user_id,
  73. max_results = 50, pagination_token = None, return_dataclass=False):
  74. # GET /2/users/:id/followers
  75. url = "https://api.twitter.com/2/users/{}/followers".format(user_id)
  76. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
  77. params = {
  78. 'user.fields' : ','.join(user_fields),
  79. 'max_results': max_results
  80. }
  81. if pagination_token:
  82. params['pagination_token'] = pagination_token
  83. headers = {
  84. 'Authorization': 'Bearer {}'.format(self.token)
  85. }
  86. response = requests.get(url, params=params, headers=headers)
  87. result = json.loads(response.text)
  88. typed_result = from_dict(data_class=UserSearchResponse, data=result)
  89. if return_dataclass:
  90. return typed_result
  91. result = cleandict(asdict(typed_result))
  92. return result
  93. def follow_user (self, user_id, target_user_id):
  94. # POST /2/users/:id/following
  95. # {target_user_id}
  96. return
  97. def unfollow_user (self, user_id, target_user_id):
  98. # DELETE /2/users/:source_user_id/following/:target_user_id
  99. return
  100. class ApiV2ConversationSource:
  101. def __init__ (self, token):
  102. self.token = token
  103. def get_recent_events (self, max_results = None, pagination_token = None):
  104. # https://developer.twitter.com/en/docs/twitter-api/direct-messages/lookup/api-reference/get-dm_events
  105. url = "https://api.twitter.com/2/dm_events"
  106. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  107. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  108. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  109. params = {
  110. "dm_event.fields": "id,event_type,text,created_at,dm_conversation_id,sender_id,participant_ids,referenced_tweets,attachments",
  111. "expansions": ",".join(["sender_id", "participant_ids", "referenced_tweets.id", "attachments.media_keys"]),
  112. "user.fields": ",".join(user_fields),
  113. "tweet.fields": ",".join(tweet_fields),
  114. "media.fields": ",".join(media_fields)
  115. }
  116. if max_results:
  117. params['max_results'] = max_results
  118. if pagination_token:
  119. params['pagination_token'] = pagination_token
  120. headers = {"Authorization": "Bearer {}".format(self.token)}
  121. response = requests.get(url, params=params, headers=headers)
  122. response_json = json.loads(response.text)
  123. #print(response_json)
  124. typed_resp = from_dict(data=response_json, data_class=DMEventsResponse)
  125. return typed_resp
  126. def get_conversation (self, dm_conversation_id,
  127. max_results = None, pagination_token = None):
  128. return
  129. def get_conversation_with_user (self, user_id,
  130. max_results = None, pagination_token = None):
  131. return
  132. def send_message (self, dm_conversation_id, text, attachments = None):
  133. url = f'/2/dm_conversations/{dm_conversation_id}/messages'
  134. body = {
  135. 'text': text
  136. }
  137. if attachments:
  138. body['attachments'] = attachments
  139. headers = {"Authorization": "Bearer {}".format(self.token)}
  140. resp = requests.post(url, data=json.dumps(body), headers=headers)
  141. result = json.loads(resp.text)
  142. example_resp_text = """
  143. {
  144. "dm_conversation_id": "1346889436626259968",
  145. "dm_event_id": "128341038123"
  146. }
  147. """
  148. return result
  149. class ApiV2TweetSource:
  150. def __init__ (self, token):
  151. self.token = token
  152. def create_tweet (self, text,
  153. reply_to_tweet_id = None, quote_tweet_id = None):
  154. url = "https://api.twitter.com/2/tweets"
  155. tweet = {
  156. 'text': text
  157. }
  158. if reply_to_tweet_id:
  159. tweet['reply'] = {
  160. 'in_reply_to_tweet_id': reply_to_tweet_id
  161. }
  162. if quote_tweet_id:
  163. tweet['quote_tweet_id'] = quote_tweet_id
  164. body = json.dumps(tweet)
  165. headers = {
  166. 'Authorization': 'Bearer {}'.format(self.token),
  167. 'Content-Type': 'application/json'
  168. }
  169. response = requests.post(url, data=body, headers=headers)
  170. result = json.loads(response.text)
  171. return result
  172. def retweet (self, tweet_id, user_id):
  173. url = "https://api.twitter.com/2/users/{}/retweets".format(user_id)
  174. retweet = {
  175. 'tweet_id': tweet_id
  176. }
  177. body = json.dumps(retweet)
  178. headers = {
  179. 'Authorization': 'Bearer {}'.format(self.token),
  180. 'Content-Type': 'application/json'
  181. }
  182. response = requests.post(url, data=body, headers=headers)
  183. result = json.loads(response.text)
  184. return result
  185. def delete_retweet (self, tweet_id, user_id):
  186. url = "https://api.twitter.com/2/users/{}/retweets/{}".format(user_id, tweet_id)
  187. headers = {
  188. 'Authorization': 'Bearer {}'.format(self.token)
  189. }
  190. response = requests.delete(url, headers=headers)
  191. result = json.loads(response.text)
  192. return result
  193. def bookmark (self, tweet_id, user_id):
  194. url = "https://api.twitter.com/2/users/{}/bookmarks".format(user_id)
  195. bookmark = {
  196. 'tweet_id': tweet_id
  197. }
  198. body = json.dumps(bookmark)
  199. headers = {
  200. 'Authorization': 'Bearer {}'.format(self.token),
  201. 'Content-Type': 'application/json'
  202. }
  203. response = requests.post(url, data=body, headers=headers)
  204. result = json.loads(response.text)
  205. return result
  206. def delete_bookmark (self, tweet_id, user_id):
  207. url = "https://api.twitter.com/2/users/{}/bookmarks/{}".format(user_id, tweet_id)
  208. headers = {
  209. 'Authorization': 'Bearer {}'.format(self.token)
  210. }
  211. response = requests.delete(url, headers=headers)
  212. print(response.status_code)
  213. result = json.loads(response.text)
  214. return result
  215. def get_home_timeline (self, user_id, variant = 'reverse_chronological', max_results = 10, pagination_token = None, since_id = None, until_id = None, end_time = None, start_time=None) -> TweetSearchResponse:
  216. """
  217. Get a user's timeline as viewed by the user themselves.
  218. """
  219. path = 'users/{}/timelines/{}'.format(user_id, variant)
  220. return self.get_timeline(path,
  221. max_results=max_results, pagination_token=pagination_token, since_id=since_id, until_id=until_id, end_time=end_time, start_time=start_time, return_dataclass=True)
  222. def get_timeline (self, path,
  223. max_results = 10, pagination_token = None, since_id = None,
  224. until_id = None,
  225. end_time = None,
  226. start_time = None,
  227. non_public_metrics = False,
  228. exclude_replies=False,
  229. exclude_retweets=False,
  230. return_dataclass=False):
  231. """
  232. Get any timeline, including custom curated timelines built by Tweet Deck / ApiV11.
  233. Max 3,200 for Essential access, and 800 if exclude_replies=True
  234. """
  235. token = self.token
  236. url = "https://api.twitter.com/2/{}".format(path)
  237. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  238. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  239. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  240. expansions = ["entities.mentions.username",
  241. "attachments.media_keys",
  242. "author_id",
  243. "referenced_tweets.id",
  244. "referenced_tweets.id.author_id"]
  245. if non_public_metrics:
  246. tweet_fields.append("non_public_metrics")
  247. media_fields.append("non_public_metrics")
  248. params = {
  249. "expansions": ",".join(expansions),
  250. "media.fields": ",".join(media_fields),
  251. "tweet.fields": ",".join(tweet_fields),
  252. "user.fields": ",".join(user_fields),
  253. "max_results": max_results,
  254. }
  255. exclude = []
  256. if exclude_replies:
  257. exclude.append('replies')
  258. if exclude_retweets:
  259. exclude.append('retweets')
  260. if len(exclude):
  261. print(f'get_timeline exclude={exclude}')
  262. params['exclude'] = ','.join(exclude)
  263. if pagination_token:
  264. params['pagination_token'] = pagination_token
  265. if since_id:
  266. params['since_id'] = since_id
  267. if until_id:
  268. params['until_id'] = until_id
  269. if end_time:
  270. params['end_time'] = end_time
  271. if start_time:
  272. params['start_time'] = start_time
  273. headers = {"Authorization": "Bearer {}".format(token)}
  274. #headers = {"Authorization": "access_token {}".format(access_token)}
  275. response = requests.get(url, params=params, headers=headers)
  276. response_json = json.loads(response.text)
  277. try:
  278. #print(json.dumps(response_json, indent = 2))
  279. typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
  280. except:
  281. print('error converting response to dataclass')
  282. print(json.dumps(response_json, indent = 2))
  283. if not return_dataclass:
  284. return response_json
  285. raise 'error converting response to dataclass'
  286. if return_dataclass:
  287. return typed_resp
  288. checked_resp = cleandict(asdict(typed_resp))
  289. print('using checked response to get_timeline')
  290. #print(json.dumps(checked_resp, indent=2))
  291. #print('og=')
  292. #print(json.dumps(response_json, indent=2))
  293. return checked_resp
  294. def get_mentions_timeline (self, user_id,
  295. max_results = 10, pagination_token = None, since_id = None, return_dataclass=False):
  296. path = "users/{}/mentions".format(user_id)
  297. return self.get_timeline(path,
  298. max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
  299. def get_user_timeline (self, user_id,
  300. max_results = 10, pagination_token = None,
  301. since_id = None,
  302. until_id = None,
  303. start_time = None,
  304. end_time = None,
  305. non_public_metrics=False,
  306. exclude_replies=False,
  307. exclude_retweets=False,
  308. return_dataclass=False):
  309. """
  310. Get a user's Tweets as viewed by another.
  311. """
  312. path = "users/{}/tweets".format(user_id)
  313. return self.get_timeline(path,
  314. max_results=max_results, pagination_token=pagination_token, since_id=since_id,
  315. until_id=until_id,start_time=start_time,
  316. non_public_metrics = non_public_metrics,
  317. exclude_replies=exclude_replies, exclude_retweets=exclude_retweets, return_dataclass=return_dataclass)
  318. def get_tweet (self, id_, non_public_metrics = False, return_dataclass=False):
  319. return self.get_tweets([id_], non_public_metrics = non_public_metrics, return_dataclass=return_dataclass)
  320. def get_tweets (self,
  321. ids,
  322. non_public_metrics = False,
  323. return_dataclass = False):
  324. token = self.token
  325. url = "https://api.twitter.com/2/tweets"
  326. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  327. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  328. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  329. expansions = ["entities.mentions.username",
  330. "attachments.media_keys",
  331. "author_id",
  332. "referenced_tweets.id",
  333. "referenced_tweets.id.author_id"]
  334. if non_public_metrics:
  335. tweet_fields.append("non_public_metrics")
  336. media_fields.append("non_public_metrics")
  337. params = {
  338. "ids": ','.join(ids),
  339. "expansions": ",".join(expansions),
  340. "media.fields": ",".join(media_fields),
  341. "tweet.fields": ",".join(tweet_fields),
  342. "user.fields": ",".join(user_fields)
  343. }
  344. headers = {"Authorization": "Bearer {}".format(token)}
  345. #print(params)
  346. response = requests.get(url, params=params, headers=headers)
  347. response_json = json.loads(response.text)
  348. print(json.dumps(response_json, indent=2))
  349. typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
  350. if return_dataclass:
  351. return typed_resp
  352. checked_resp = cleandict(asdict(typed_resp))
  353. print('using checked response to search_tweets')
  354. return checked_resp
  355. def search_tweets (self,
  356. query,
  357. pagination_token = None,
  358. since_id = None,
  359. max_results = 10,
  360. sort_order = None,
  361. non_public_metrics = False,
  362. return_dataclass = False
  363. ):
  364. token = self.token
  365. url = "https://api.twitter.com/2/tweets/search/recent"
  366. tweet_fields = ["created_at", "conversation_id", "referenced_tweets", "text", "public_metrics", "entities", "attachments"]
  367. media_fields = ["alt_text", "type", "preview_image_url", "public_metrics", "url", "media_key", "duration_ms", "width", "height", "variants"]
  368. user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
  369. expansions = ["entities.mentions.username",
  370. "attachments.media_keys",
  371. "author_id",
  372. "referenced_tweets.id",
  373. "referenced_tweets.id.author_id"]
  374. if non_public_metrics:
  375. tweet_fields.append("non_public_metrics")
  376. media_fields.append("non_public_metrics")
  377. params = {
  378. "expansions": ",".join(expansions),
  379. "media.fields": ",".join(media_fields),
  380. "tweet.fields": ",".join(tweet_fields),
  381. "user.fields": ",".join(user_fields),
  382. "query": query,
  383. "max_results": max_results,
  384. }
  385. if pagination_token:
  386. params['pagination_token'] = pagination_token
  387. if since_id:
  388. params['since_id'] = since_id
  389. if until_id:
  390. params['until_id'] = until_id
  391. if start_time:
  392. params['start_time'] = start_time
  393. if end_time:
  394. params['end_time'] = end_time
  395. if sort_order:
  396. params['sort_order'] = sort_order
  397. headers = {"Authorization": "Bearer {}".format(token)}
  398. response = requests.get(url, params=params, headers=headers)
  399. response_json = json.loads(response.text)
  400. try:
  401. typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
  402. except:
  403. print('error converting tweet search response to TweetSearchResponse')
  404. print(response_json)
  405. raise 'error converting tweet search response to TweetSearchResponse'
  406. if return_dataclass:
  407. return typed_resp
  408. checked_resp = cleandict(asdict(typed_resp))
  409. print('using checked response to search_tweets')
  410. return checked_resp
  411. def count_tweets (self,
  412. query,
  413. since_id = None,
  414. granularity = 'hour'
  415. ):
  416. """
  417. App rate limit (Application-only): 300 requests per 15-minute window shared among all users of your app = once per 3 seconds.
  418. """
  419. token = self.token
  420. url = "https://api.twitter.com/2/tweets/counts/recent"
  421. params = {
  422. "query": query
  423. }
  424. if since_id:
  425. params['since_id'] = since_id
  426. headers = {"Authorization": "Bearer {}".format(token)}
  427. response = requests.get(url, params=params, headers=headers)
  428. #print(response.status_code)
  429. #print(response.text)
  430. response_json = json.loads(response.text)
  431. return response_json
  432. #def get_conversation (self, tweet_id, pagination_token = None,
  433. # TODO
  434. def get_thread (self, tweet_id,
  435. author_id = None,
  436. only_replies = False,
  437. pagination_token = None,
  438. since_id = None,
  439. max_results = 10,
  440. sort_order = None,
  441. return_dataclass=False
  442. ):
  443. # FIXME author_id can be determined from a Tweet object
  444. query = ""
  445. if author_id:
  446. query += " from:{}".format(author_id)
  447. if only_replies:
  448. query += " in_reply_to_tweet_id:{}".format(tweet_id)
  449. else:
  450. query += " conversation_id:{}".format(tweet_id)
  451. print("get_thread query=" + query)
  452. return self.search_tweets(query,
  453. pagination_token = pagination_token, since_id = since_id, max_results = max_results, sort_order = sort_order,
  454. return_dataclass=return_dataclass)
  455. def get_bookmarks (self, user_id,
  456. max_results = 10, pagination_token = None, since_id = None,
  457. return_dataclass=False):
  458. path = "users/{}/bookmarks".format(user_id)
  459. return self.get_timeline(path,
  460. max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
  461. def get_media_tweets (self,
  462. author_id = None,
  463. has_media = True,
  464. has_links = None,
  465. has_images = None,
  466. has_videos = None,
  467. is_reply = None,
  468. is_retweet = None,
  469. pagination_token = None,
  470. since_id = None,
  471. max_results = 10,
  472. sort_order = None,
  473. return_dataclass=False
  474. ):
  475. # FIXME author_id can be determined from a Tweet object
  476. query = ""
  477. if has_media != None:
  478. if not has_media:
  479. query += "-"
  480. query += "has:media "
  481. if has_links != None:
  482. if not has_links:
  483. query += " -"
  484. query += "has:links "
  485. if has_images != None:
  486. if not has_images:
  487. query += " -"
  488. query += "has:images "
  489. if has_videos != None:
  490. if not has_videos:
  491. query += " -"
  492. query += "has:videos "
  493. if is_reply != None:
  494. if not is_reply:
  495. query += " -"
  496. query += "is:reply "
  497. if is_retweet != None:
  498. if not is_retweet:
  499. query += " -"
  500. query += "is:retweet "
  501. if author_id:
  502. query += "from:{} ".format(author_id)
  503. return self.search_tweets(query,
  504. pagination_token = pagination_token, since_id = since_id, max_results = max_results, sort_order = sort_order, return_dataclass = return_dataclass)
  505. def get_retweets (self, tweet_id):
  506. # GET /2/tweets/:id/retweeted_by
  507. return
  508. def get_quote_tweets( self, tweet_id):
  509. # GET /2/tweets/:id/quote_tweets
  510. return
  511. def get_liked_tweets (self, user_id,
  512. max_results = 10, pagination_token = None, since_id = None, return_dataclass=False):
  513. # GET /2/users/:id/liked_tweets
  514. # User rate limit (User context): 75 requests per 15-minute window per each authenticated user
  515. path = "users/{}/liked_tweets".format(user_id)
  516. print('get_liked_tweets')
  517. return self.get_timeline(path,
  518. max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
  519. def get_liking_users (self, tweet_id,
  520. max_results = None, pagination_token = None,
  521. return_dataclass=False):
  522. # GET /2/tweets/:id/liking_users
  523. # User rate limit (User context): 75 requests per 15-minute window per each authenticated user
  524. url = f"https://api.twitter.com/2/tweets/{tweet_id}/liking_users"
  525. user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
  526. expansions = []
  527. params = cleandict({
  528. "user.fields": ','.join(user_fields),
  529. "max_results": max_results,
  530. "pagination_token": pagination_token,
  531. "expansions": ','.join(expansions),
  532. })
  533. headers = {
  534. "Authorization": f"Bearer {self.token}"
  535. }
  536. resp = requests.get(url, headers=headers, params=params)
  537. result = json.loads(resp.text)
  538. typed_result = from_dict(data_class=UserSearchResponse, data=result)
  539. #print(typed_result)
  540. if return_dataclass:
  541. return typed_result
  542. result = cleandict(asdict(typed_result))
  543. return result
  544. def like_tweet (self, tweet_id):
  545. # POST /2/users/:user_id/likes
  546. # {id: tweet_id}
  547. return
  548. def delete_like (self, tweet_id, user_id):
  549. url = "https://api.twitter.com/2/users/{}/likes/{}".format(user_id, tweet_id)
  550. headers = {
  551. 'Authorization': 'Bearer {}'.format(self.token)
  552. }
  553. response = requests.delete(url, headers=headers)
  554. print(response.status_code)
  555. result = json.loads(response.text)
  556. return result
  557. def get_list_tweets (self, list_id):
  558. # GET /2/lists/:id/tweets
  559. return
  560. def cleandict(d):
  561. if isinstance(d, dict):
  562. return {k: cleandict(v) for k, v in d.items() if v is not None}
  563. elif isinstance(d, list):
  564. return [cleandict(v) for v in d]
  565. else:
  566. return d