|
@@ -1,7 +1,13 @@
|
|
|
+from dataclasses import asdict
|
|
|
+from typing import List
|
|
|
+from dacite import from_dict
|
|
|
+
|
|
|
import json
|
|
|
import requests
|
|
|
import sqlite3
|
|
|
|
|
|
+from twitter_v2_types import TweetSearchResponse, DMEventsResponse, UserSearchResponse
|
|
|
+
|
|
|
class ArchiveTweetSource:
|
|
|
"""
|
|
|
id, created_at, retweeted, favorited, retweet_count, favorite_count, full_text, in_reply_to_status_id_str, in_reply_to_user_id, in_reply_to_screen_nam
|
|
@@ -29,7 +35,7 @@ class ArchiveTweetSource:
|
|
|
|
|
|
# if the ID is not stored as a number (eg. string) then this could be a problem
|
|
|
if since_id:
|
|
|
- where_sql.append("id > ?")
|
|
|
+ where_sql.append("cast(id as integer) > ?")
|
|
|
sql_params.append(since_id)
|
|
|
|
|
|
#if author_id:
|
|
@@ -46,7 +52,7 @@ class ArchiveTweetSource:
|
|
|
if where_sql:
|
|
|
where_sql = "where {}".format(where_sql)
|
|
|
|
|
|
- sql = "select {} from tweet {} order by created_at asc limit ?".format(sql_cols, where_sql)
|
|
|
+ sql = "select {} from tweet {} order by cast(id as integer) asc limit ?".format(sql_cols, where_sql)
|
|
|
sql_params.append(max_results)
|
|
|
|
|
|
|
|
@@ -63,21 +69,23 @@ class ArchiveTweetSource:
|
|
|
return results
|
|
|
|
|
|
def get_tweet (self, id_):
|
|
|
- return self.get_tweets([id_])
|
|
|
+ tweets = self.get_tweets([id_])
|
|
|
+ if len(tweets):
|
|
|
+ return tweets[0]
|
|
|
|
|
|
def get_tweets (self,
|
|
|
ids):
|
|
|
|
|
|
sql_params = []
|
|
|
where_sql = []
|
|
|
- if since_id:
|
|
|
- ids_in_list_sql = "id in ({})".format( ','.join(['?'] * len(ids)))
|
|
|
- where_sql.append(ids_in_list_sql)
|
|
|
- sql_params += ids
|
|
|
|
|
|
+ ids_in_list_sql = "id in ({})".format( ','.join(['?'] * len(ids)))
|
|
|
+ where_sql.append(ids_in_list_sql)
|
|
|
+ sql_params += ids
|
|
|
+
|
|
|
where_sql = " and ".join(where_sql)
|
|
|
|
|
|
- sql = "select * from tweet where {} limit ?".format(where_sql)
|
|
|
+ sql = "select * from tweet where {}".format(where_sql)
|
|
|
|
|
|
db = self.get_db()
|
|
|
|
|
@@ -86,6 +94,8 @@ class ArchiveTweetSource:
|
|
|
|
|
|
results = list(map(dict, cur.execute(sql, sql_params).fetchall()))
|
|
|
|
|
|
+ results.sort(key=lambda t: ids.index(t['id']))
|
|
|
+
|
|
|
return results
|
|
|
|
|
|
def search_tweets (self,
|
|
@@ -119,24 +129,49 @@ class TwitterApiV2SocialGraph:
|
|
|
def __init__ (self, token):
|
|
|
self.token = token
|
|
|
|
|
|
- def get_user (user_id, is_username=False):
|
|
|
+ def get_user (self, user_id, is_username=False):
|
|
|
# GET /2/users/:id
|
|
|
# GET /2/users/by/:username
|
|
|
- return
|
|
|
+ return self.get_users([user_id], is_username)
|
|
|
|
|
|
- def get_users (user_ids, are_usernames=False):
|
|
|
+ def get_users (self, user_ids, are_usernames=False):
|
|
|
# GET /2/users/by?usernames=
|
|
|
# GET /2/users?ids=
|
|
|
- return
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
|
|
|
+
|
|
|
+ params = {
|
|
|
+
|
|
|
+ 'user.fields' : ','.join(user_fields),
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ if are_usernames:
|
|
|
+ url = "https://api.twitter.com/2/users/by"
|
|
|
+ params['usernames'] = user_ids
|
|
|
+ else:
|
|
|
+ url = "https://api.twitter.com/2/users"
|
|
|
+ params['ids'] = user_ids
|
|
|
+
|
|
|
+ headers = {
|
|
|
+ 'Authorization': 'Bearer {}'.format(self.token)
|
|
|
+ }
|
|
|
+
|
|
|
+ response = requests.get(url, params=params, headers=headers)
|
|
|
+ result = json.loads(response.text)
|
|
|
+
|
|
|
+ return result
|
|
|
|
|
|
def get_following (self, user_id,
|
|
|
- max_results = 1000, pagination_token = None):
|
|
|
+ max_results = 50, pagination_token = None, return_dataclass=False):
|
|
|
# GET /2/users/:id/following
|
|
|
|
|
|
url = "https://api.twitter.com/2/users/{}/following".format(user_id)
|
|
|
|
|
|
|
|
|
- user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
|
|
|
+ user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified"]
|
|
|
|
|
|
params = {
|
|
|
'user.fields' : ','.join(user_fields),
|
|
@@ -150,25 +185,30 @@ class TwitterApiV2SocialGraph:
|
|
|
params['pagination_token'] = pagination_token
|
|
|
|
|
|
headers = {
|
|
|
- 'Authorization': 'Bearer {}'.format(self.token),
|
|
|
- 'Content-Type': 'application/json'
|
|
|
+ 'Authorization': 'Bearer {}'.format(self.token)
|
|
|
}
|
|
|
|
|
|
response = requests.get(url, params=params, headers=headers)
|
|
|
result = json.loads(response.text)
|
|
|
|
|
|
+ typed_result = from_dict(data_class=UserSearchResponse, data=result)
|
|
|
+
|
|
|
+ if return_dataclass:
|
|
|
+ return typed_result
|
|
|
+
|
|
|
+ result = cleandict(asdict(typed_result))
|
|
|
+
|
|
|
return result
|
|
|
|
|
|
- return
|
|
|
|
|
|
def get_followers (self, user_id,
|
|
|
- max_results = 1000, pagination_token = None):
|
|
|
+ max_results = 50, pagination_token = None, return_dataclass=False):
|
|
|
# GET /2/users/:id/followers
|
|
|
|
|
|
url = "https://api.twitter.com/2/users/{}/followers".format(user_id)
|
|
|
|
|
|
|
|
|
- user_fields = ["created_at", "name", "username", "location", "profile_image_url", "verified"]
|
|
|
+ user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
|
|
|
|
|
|
params = {
|
|
|
'user.fields' : ','.join(user_fields),
|
|
@@ -182,25 +222,97 @@ class TwitterApiV2SocialGraph:
|
|
|
params['pagination_token'] = pagination_token
|
|
|
|
|
|
headers = {
|
|
|
- 'Authorization': 'Bearer {}'.format(self.token),
|
|
|
- 'Content-Type': 'application/json'
|
|
|
+ 'Authorization': 'Bearer {}'.format(self.token)
|
|
|
}
|
|
|
|
|
|
response = requests.get(url, params=params, headers=headers)
|
|
|
result = json.loads(response.text)
|
|
|
|
|
|
+ typed_result = from_dict(data_class=UserSearchResponse, data=result)
|
|
|
+
|
|
|
+ if return_dataclass:
|
|
|
+ return typed_result
|
|
|
+
|
|
|
+ result = cleandict(asdict(typed_result))
|
|
|
+
|
|
|
return result
|
|
|
|
|
|
|
|
|
- def follow_user (user_id, target_user_id):
|
|
|
+ def follow_user (self, user_id, target_user_id):
|
|
|
# POST /2/users/:id/following
|
|
|
# {target_user_id}
|
|
|
return
|
|
|
|
|
|
- def unfollow_user (user_id, target_user_id):
|
|
|
+ def unfollow_user (self, user_id, target_user_id):
|
|
|
# DELETE /2/users/:source_user_id/following/:target_user_id
|
|
|
return
|
|
|
|
|
|
+class ApiV2ConversationSource:
|
|
|
+ def __init__ (self, token):
|
|
|
+ self.token = token
|
|
|
+
|
|
|
+ def get_recent_events (self, max_results = None, pagination_token = None):
|
|
|
+
|
|
|
+ # https://developer.twitter.com/en/docs/twitter-api/direct-messages/lookup/api-reference/get-dm_events
|
|
|
+ url = "https://api.twitter.com/2/dm_events"
|
|
|
+
|
|
|
+ params = {
|
|
|
+ "dm_event.fields": "id,event_type,text,created_at,dm_conversation_id,sender_id,participant_ids,referenced_tweets,attachments",
|
|
|
+ "expansions": ",".join(["sender_id", "participant_ids"]),
|
|
|
+
|
|
|
+ "user.fields": ",".join(["id", "created_at", "name", "username", "location", "profile_image_url", "url", "verified"])
|
|
|
+ }
|
|
|
+
|
|
|
+ if max_results:
|
|
|
+ params['max_results'] = max_results
|
|
|
+
|
|
|
+ if pagination_token:
|
|
|
+ params['pagination_token'] = pagination_token
|
|
|
+
|
|
|
+ headers = {"Authorization": "Bearer {}".format(self.token)}
|
|
|
+
|
|
|
+ response = requests.get(url, params=params, headers=headers)
|
|
|
+ response_json = json.loads(response.text)
|
|
|
+
|
|
|
+ typed_resp = from_dict(data=response_json, data_class=DMEventsResponse)
|
|
|
+
|
|
|
+ return typed_resp
|
|
|
+
|
|
|
+ def get_conversation (self, dm_conversation_id,
|
|
|
+ max_results = None, pagination_token = None):
|
|
|
+
|
|
|
+ return
|
|
|
+
|
|
|
+ def get_conversation_with_user (self, user_id,
|
|
|
+ max_results = None, pagination_token = None):
|
|
|
+
|
|
|
+ return
|
|
|
+
|
|
|
+ def send_message (self, dm_conversation_id, text, attachments = None):
|
|
|
+ url = f'/2/dm_conversations/{dm_conversation_id}/messages'
|
|
|
+
|
|
|
+ body = {
|
|
|
+ 'text': text
|
|
|
+ }
|
|
|
+
|
|
|
+ if attachments:
|
|
|
+ body['attachments'] = attachments
|
|
|
+
|
|
|
+ headers = {"Authorization": "Bearer {}".format(self.token)}
|
|
|
+
|
|
|
+ resp = requests.post(url, data=json.dumps(body), headers=headers)
|
|
|
+
|
|
|
+ result = json.loads(resp.text)
|
|
|
+
|
|
|
+ example_resp_text = """
|
|
|
+ {
|
|
|
+ "dm_conversation_id": "1346889436626259968",
|
|
|
+ "dm_event_id": "128341038123"
|
|
|
+ }
|
|
|
+ """
|
|
|
+
|
|
|
+ return result
|
|
|
+
|
|
|
class ApiV2TweetSource:
|
|
|
def __init__ (self, token):
|
|
|
self.token = token
|
|
@@ -289,7 +401,7 @@ class ApiV2TweetSource:
|
|
|
return result
|
|
|
|
|
|
|
|
|
- def get_home_timeline (self, user_id, variant = 'reverse_chronological', max_results = 10, pagination_token = None, since_id = None):
|
|
|
+ def get_home_timeline (self, user_id, variant = 'reverse_chronological', max_results = 10, pagination_token = None, since_id = None, until_id = None, end_time = None) -> TweetSearchResponse:
|
|
|
"""
|
|
|
Get a user's timeline as viewed by the user themselves.
|
|
|
"""
|
|
@@ -297,13 +409,16 @@ class ApiV2TweetSource:
|
|
|
path = 'users/{}/timelines/{}'.format(user_id, variant)
|
|
|
|
|
|
return self.get_timeline(path,
|
|
|
- max_results=max_results, pagination_token=pagination_token, since_id=since_id)
|
|
|
+ max_results=max_results, pagination_token=pagination_token, since_id=since_id, until_id=until_id, end_time=end_time, return_dataclass=True)
|
|
|
|
|
|
def get_timeline (self, path,
|
|
|
max_results = 10, pagination_token = None, since_id = None,
|
|
|
+ until_id = None,
|
|
|
+ end_time = None,
|
|
|
non_public_metrics = False,
|
|
|
exclude_replies=False,
|
|
|
- exclude_retweets=False):
|
|
|
+ exclude_retweets=False,
|
|
|
+ return_dataclass=False):
|
|
|
"""
|
|
|
Get any timeline, including custom curated timelines built by Tweet Deck / ApiV11.
|
|
|
"""
|
|
@@ -345,14 +460,22 @@ class ApiV2TweetSource:
|
|
|
exclude.append('retweets')
|
|
|
|
|
|
if len(exclude):
|
|
|
+ print(f'get_timeline exclude={exclude}')
|
|
|
params['exclude'] = ','.join(exclude)
|
|
|
|
|
|
|
|
|
+
|
|
|
if pagination_token:
|
|
|
params['pagination_token'] = pagination_token
|
|
|
|
|
|
if since_id:
|
|
|
params['since_id'] = since_id
|
|
|
+
|
|
|
+ if until_id:
|
|
|
+ params['until_id'] = until_id
|
|
|
+
|
|
|
+ if end_time:
|
|
|
+ params['end_time'] = end_time
|
|
|
|
|
|
headers = {"Authorization": "Bearer {}".format(token)}
|
|
|
|
|
@@ -361,21 +484,45 @@ class ApiV2TweetSource:
|
|
|
response = requests.get(url, params=params, headers=headers)
|
|
|
response_json = json.loads(response.text)
|
|
|
|
|
|
- return response_json
|
|
|
+ try:
|
|
|
+ #print(json.dumps(response_json, indent = 2))
|
|
|
+ typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
|
|
|
+ except:
|
|
|
+ print('error converting response to dataclass')
|
|
|
+ print(json.dumps(response_json, indent = 2))
|
|
|
+
|
|
|
+ if not return_dataclass:
|
|
|
+ return response_json
|
|
|
+
|
|
|
+ raise 'error converting response to dataclass'
|
|
|
+
|
|
|
+ if return_dataclass:
|
|
|
+ return typed_resp
|
|
|
|
|
|
+ checked_resp = cleandict(asdict(typed_resp))
|
|
|
+
|
|
|
+ print('using checked response to get_timeline')
|
|
|
+
|
|
|
+ #print(json.dumps(checked_resp, indent=2))
|
|
|
+ #print('og=')
|
|
|
+ #print(json.dumps(response_json, indent=2))
|
|
|
+
|
|
|
+ return checked_resp
|
|
|
+
|
|
|
def get_mentions_timeline (self, user_id,
|
|
|
- max_results = 10, pagination_token = None, since_id = None):
|
|
|
+ max_results = 10, pagination_token = None, since_id = None, return_dataclass=False):
|
|
|
|
|
|
path = "users/{}/mentions".format(user_id)
|
|
|
|
|
|
return self.get_timeline(path,
|
|
|
- max_results=max_results, pagination_token=pagination_token, since_id=since_id)
|
|
|
+ max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
|
|
|
|
|
|
def get_user_timeline (self, user_id,
|
|
|
max_results = 10, pagination_token = None, since_id = None,
|
|
|
non_public_metrics=False,
|
|
|
exclude_replies=False,
|
|
|
- exclude_retweets=False):
|
|
|
+ exclude_retweets=False,
|
|
|
+ return_dataclass=False):
|
|
|
"""
|
|
|
Get a user's Tweets as viewed by another.
|
|
|
"""
|
|
@@ -384,15 +531,16 @@ class ApiV2TweetSource:
|
|
|
return self.get_timeline(path,
|
|
|
max_results=max_results, pagination_token=pagination_token, since_id=since_id,
|
|
|
non_public_metrics = non_public_metrics,
|
|
|
- exclude_replies=exclude_replies, exclude_retweets=exclude_retweets)
|
|
|
+ exclude_replies=exclude_replies, exclude_retweets=exclude_retweets, return_dataclass=return_dataclass)
|
|
|
|
|
|
|
|
|
- def get_tweet (self, id_, non_public_metrics = False):
|
|
|
- return self.get_tweets([id_], non_public_metrics = non_public_metrics)
|
|
|
+ def get_tweet (self, id_, non_public_metrics = False, return_dataclass=False):
|
|
|
+ return self.get_tweets([id_], non_public_metrics = non_public_metrics, return_dataclass=return_dataclass)
|
|
|
|
|
|
def get_tweets (self,
|
|
|
ids,
|
|
|
- non_public_metrics = False):
|
|
|
+ non_public_metrics = False,
|
|
|
+ return_dataclass = False):
|
|
|
|
|
|
token = self.token
|
|
|
|
|
@@ -426,7 +574,18 @@ class ApiV2TweetSource:
|
|
|
response = requests.get(url, params=params, headers=headers)
|
|
|
response_json = json.loads(response.text)
|
|
|
|
|
|
- return response_json
|
|
|
+ print(json.dumps(response_json, indent=2))
|
|
|
+
|
|
|
+ typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
|
|
|
+
|
|
|
+ if return_dataclass:
|
|
|
+ return typed_resp
|
|
|
+
|
|
|
+ checked_resp = cleandict(asdict(typed_resp))
|
|
|
+
|
|
|
+ print('using checked response to search_tweets')
|
|
|
+
|
|
|
+ return checked_resp
|
|
|
|
|
|
def search_tweets (self,
|
|
|
query,
|
|
@@ -434,7 +593,8 @@ class ApiV2TweetSource:
|
|
|
since_id = None,
|
|
|
max_results = 10,
|
|
|
sort_order = None,
|
|
|
- non_public_metrics = False
|
|
|
+ non_public_metrics = False,
|
|
|
+ return_dataclass = False
|
|
|
):
|
|
|
|
|
|
token = self.token
|
|
@@ -480,7 +640,22 @@ class ApiV2TweetSource:
|
|
|
response = requests.get(url, params=params, headers=headers)
|
|
|
response_json = json.loads(response.text)
|
|
|
|
|
|
- return response_json
|
|
|
+ try:
|
|
|
+ typed_resp = from_dict(data=response_json, data_class=TweetSearchResponse)
|
|
|
+ except:
|
|
|
+ print('error converting tweet search response to TweetSearchResponse')
|
|
|
+ print(response_json)
|
|
|
+
|
|
|
+ raise 'error converting tweet search response to TweetSearchResponse'
|
|
|
+
|
|
|
+ if return_dataclass:
|
|
|
+ return typed_resp
|
|
|
+
|
|
|
+ checked_resp = cleandict(asdict(typed_resp))
|
|
|
+
|
|
|
+ print('using checked response to search_tweets')
|
|
|
+
|
|
|
+ return checked_resp
|
|
|
|
|
|
|
|
|
|
|
@@ -522,7 +697,8 @@ class ApiV2TweetSource:
|
|
|
pagination_token = None,
|
|
|
since_id = None,
|
|
|
max_results = 10,
|
|
|
- sort_order = None
|
|
|
+ sort_order = None,
|
|
|
+ return_dataclass=False
|
|
|
):
|
|
|
|
|
|
# FIXME author_id can be determined from a Tweet object
|
|
@@ -538,14 +714,16 @@ class ApiV2TweetSource:
|
|
|
print("get_thread query=" + query)
|
|
|
|
|
|
return self.search_tweets(query,
|
|
|
- pagination_token = pagination_token, since_id = since_id, max_results = max_results, sort_order = sort_order)
|
|
|
+ pagination_token = pagination_token, since_id = since_id, max_results = max_results, sort_order = sort_order,
|
|
|
+ return_dataclass=return_dataclass)
|
|
|
|
|
|
def get_bookmarks (self, user_id,
|
|
|
- max_results = 10, pagination_token = None, since_id = None):
|
|
|
+ max_results = 10, pagination_token = None, since_id = None,
|
|
|
+ return_dataclass=False):
|
|
|
path = "users/{}/bookmarks".format(user_id)
|
|
|
|
|
|
return self.get_timeline(path,
|
|
|
- max_results=max_results, pagination_token=pagination_token, since_id=since_id)
|
|
|
+ max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
|
|
|
|
|
|
def get_media_tweets (self,
|
|
|
author_id = None,
|
|
@@ -558,7 +736,8 @@ class ApiV2TweetSource:
|
|
|
pagination_token = None,
|
|
|
since_id = None,
|
|
|
max_results = 10,
|
|
|
- sort_order = None
|
|
|
+ sort_order = None,
|
|
|
+ return_dataclass=False
|
|
|
):
|
|
|
|
|
|
# FIXME author_id can be determined from a Tweet object
|
|
@@ -599,7 +778,7 @@ class ApiV2TweetSource:
|
|
|
query += "from:{} ".format(author_id)
|
|
|
|
|
|
return self.search_tweets(query,
|
|
|
- pagination_token = pagination_token, since_id = since_id, max_results = max_results, sort_order = sort_order)
|
|
|
+ pagination_token = pagination_token, since_id = since_id, max_results = max_results, sort_order = sort_order, return_dataclass = return_dataclass)
|
|
|
|
|
|
|
|
|
def get_retweets (self, tweet_id):
|
|
@@ -610,19 +789,58 @@ class ApiV2TweetSource:
|
|
|
# GET /2/tweets/:id/quote_tweets
|
|
|
return
|
|
|
|
|
|
- def get_likes (self, user_id,
|
|
|
- max_results = 10, pagination_token = None, since_id = None):
|
|
|
+ def get_liked_tweets (self, user_id,
|
|
|
+ max_results = 10, pagination_token = None, since_id = None, return_dataclass=False):
|
|
|
# GET /2/users/:id/liked_tweets
|
|
|
+ # User rate limit (User context): 75 requests per 15-minute window per each authenticated user
|
|
|
+
|
|
|
path = "users/{}/liked_tweets".format(user_id)
|
|
|
|
|
|
return self.get_timeline(path,
|
|
|
- max_results=max_results, pagination_token=pagination_token, since_id=since_id)
|
|
|
+ max_results=max_results, pagination_token=pagination_token, since_id=since_id, return_dataclass=return_dataclass)
|
|
|
|
|
|
- def get_liked_by (self, tweet_id):
|
|
|
+ def get_liking_users (self, tweet_id,
|
|
|
+ max_results = None, pagination_token = None):
|
|
|
# GET /2/tweets/:id/liking_users
|
|
|
+ # User rate limit (User context): 75 requests per 15-minute window per each authenticated user
|
|
|
+
|
|
|
+ url = f"https://api.twitter.com/2/tweets/{tweet_id}/liking_users"
|
|
|
+
|
|
|
+ user_fields = ["id", "created_at", "name", "username", "location", "profile_image_url", "verified", "description", "public_metrics", "protected", "pinned_tweet_id", "url"]
|
|
|
+
|
|
|
+ expansions = None
|
|
|
+
|
|
|
+ params = cleandict({
|
|
|
+ "user.fields": ','.join(user_fields),
|
|
|
+ "max_results": max_results,
|
|
|
+ "pagination_token": pagination_token,
|
|
|
+ "expansions": expansions,
|
|
|
+ })
|
|
|
+
|
|
|
+ headers = {
|
|
|
+ "Authorization": f"Bearer {self.token}"
|
|
|
+ }
|
|
|
+
|
|
|
+ resp = requests.get(url, headers=headers, params=params)
|
|
|
+
|
|
|
+ response_json = json.loads(resp.text)
|
|
|
+
|
|
|
+ return response_json
|
|
|
+
|
|
|
+ def like_tweet (self, tweet_id):
|
|
|
+ # POST /2/users/:user_id/likes
|
|
|
+ # {id: tweet_id}
|
|
|
return
|
|
|
|
|
|
def get_list_tweets (self, list_id):
|
|
|
# GET /2/lists/:id/tweets
|
|
|
return
|
|
|
-
|
|
|
+
|
|
|
+
|
|
|
+def cleandict(d):
|
|
|
+ if isinstance(d, dict):
|
|
|
+ return {k: cleandict(v) for k, v in d.items() if v is not None}
|
|
|
+ elif isinstance(d, list):
|
|
|
+ return [cleandict(v) for v in d]
|
|
|
+ else:
|
|
|
+ return d
|