repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tweet-analysis-2020 | tweet-analysis-2020-main/app/twitter_service.py |
import os
from pprint import pprint
#from app.timelines.status_parser import parse_urls, parse_full_text, parse_timeline_status
from dotenv import load_dotenv
from tweepy import OAuthHandler, API, Cursor
from tweepy.error import TweepError
load_dotenv()
CONSUMER_KEY = os.getenv("TWITTER_API_KEY", default="OOPS")
CONSUMER_SECRET = os.getenv("TWITTER_API_KEY_SECRET", default="OOPS")
ACCESS_KEY = os.getenv("TWITTER_ACCESS_TOKEN", default="OOPS")
ACCESS_SECRET = os.getenv("TWITTER_ACCESS_TOKEN_SECRET", default="OOPS")
#ENVIRONMENT_NAME = os.getenv("TWITTER_ENVIRONMENT_NAME", default="OOPS") # see: https://developer.twitter.com/en/account/environments
class TwitterService:
def __init__(self, consumer_key=CONSUMER_KEY, consumer_secret=CONSUMER_SECRET, access_key=ACCESS_KEY, access_secret=ACCESS_SECRET):
"""
See:
https://developer.twitter.com/en/docs/basics/rate-limiting
http://docs.tweepy.org/en/v3.8.0/auth_tutorial.html
https://bhaskarvk.github.io/2015/01/how-to-use-twitters-search-rest-api-most-effectively./
"""
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
self.api = API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
def get_user_id(self, screen_name):
user = self.api.get_user(screen_name)
return user.id
def get_friends(self, screen_name=None, user_id=None, max_friends=2000):
"""
Params:
screen_name like "barackobama" or "s2t2" or
max_friends for now, for performacne, because we can always go back later and re-scrape those who hit this max
Returns a list of the user's friend_ids (or empty list if the account was private)
See: http://docs.tweepy.org/en/v3.8.0/api.html#API.friends_ids
https://github.com/tweepy/tweepy/blob/3733fd673b04b9aa193886d6b8eb9fdaf1718341/tweepy/api.py#L542-L551
http://docs.tweepy.org/en/v3.8.0/cursor_tutorial.html
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friends-ids
https://developer.twitter.com/en/docs/basics/cursoring
"""
if screen_name is not None:
cursor = Cursor(self.api.friends_ids, screen_name=screen_name, cursor=-1)
elif user_id is not None:
cursor = Cursor(self.api.friends_ids, user_id=user_id, cursor=-1)
else:
print("OOPS PLEASE PASS SCREEN NAME OR USER ID")
return None
#print(cursor)
friend_ids = []
try:
for friend_id in cursor.items(max_friends):
friend_ids.append(friend_id)
except TweepError as err:
print("OOPS", err) #> "Not authorized." if user is private / protected (e.g. 1003322728890462209)
return friend_ids
def fetch_user_timeline(self, request_params={}, limit=2_000):
"""
See:
https://docs.tweepy.org/en/latest/api.html#timeline-methods
https://docs.tweepy.org/en/v3.10.0/cursor_tutorial.html
Params:
request_params (dict) needs either "user_id" or "screen_name" attr
limit (int) the number of total tweets to fetch per user
... or overwrite any of the default params
Example: get_user_timeline({"user_id": 10101, "count": 100}, limit=300)
"""
default_params = {
"exclude_replies": False,
"include_rts": True,
"tweet_mode": "extended", # access the full text
"count": 200 # number of tweets per request
}
request_params = {**default_params, **request_params} # use the defaults, and override with user-specified params (including the required user_id or screen_name)
request_params["cursor"] = -1 # use a cursor approach!
cursor = Cursor(self.api.user_timeline, **request_params)
return cursor.items(limit)
if __name__ == "__main__":
SCREEN_NAME = os.getenv("TWITTER_SCREEN_NAME", default="barackobama") # just one to use for testing purposes
STATUS_LIMIT = int(os.getenv("TWITTER_SCREEN_NAME", default="5"))
service = TwitterService()
print("-------------")
print("SCREEN NAME:", SCREEN_NAME)
print("-------------")
print("USER ID:")
user_id = service.get_user_id(SCREEN_NAME)
print(user_id)
#print("-------------")
#print("ARCHIVE SEARCH...")
## https://docs.tweepy.org/en/v3.10.0/api.html#API.search_full_archive
#archives = service.api.search_full_text(
# environment_name=ENVIRONMENT_NAME,
# query="TODO",
# fromDate="2015-01-01",
# toDate="2020-03-15",
# maxResults=200
# #next=None
#)
print("-------------")
print("USER TIMELINE:")
#timeline = service.api.user_timeline(SCREEN_NAME, tweet_mode="extended")
timeline = service.fetch_user_timeline({"screen_name": SCREEN_NAME, "limit":STATUS_LIMIT})
for status in list(timeline):
#print(status.id, parse_full_text(status), parse_urls(status))
pprint(status._json)
print("---")
#pprint(parse_timeline_status(status))
print("---")
#breakpoint()
#ids = [status.id for status in timeline]
#print("-------------")
#print("STATUSES LOOKUP:")
#statuses = service.api.statuses_lookup(
# id_=ids,
# trim_user=True,
# include_card_uri=True,
# tweet_mode="extended"
#)
#for status in statuses[0:5]:
# #print(parse_full_text(status))
# pprint(status._json)
#breakpoint()
#print("-------------")
#print("FRIEND NAMES:")
#
##friend_ids = service.api.friends_ids(SCREEN_NAME)
##print(len(friend_ids))
#
#friend_ids = service.get_friends(screen_name=SCREEN_NAME)
#print(len(friend_ids))
#
##friend_ids = service.get_friends(user_id=44196397)
##print(len(friend_ids))
| 6,032 | 34.280702 | 169 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/file_storage.py |
import os
from pprint import pprint
from dotenv import load_dotenv
from app import DATA_DIR, seek_confirmation
from app.decorators.datetime_decorators import logstamp
from app.gcs_service import GoogleCloudStorageService
load_dotenv()
DIRPATH = os.getenv("DIRPATH", default="example/file_storage")
WIFI = (os.getenv("WIFI", default="true") == "true")
class FileStorage:
def __init__(self, dirpath=None, gcs_service=None, wifi=WIFI):
"""
Saves and loads files, using local storage and/or Google Cloud Storage.
Params:
dirpath (str) a subpath of the data dir
wifi (bool) whether or not to attempt uploads
"""
self.wifi = wifi
self.gcs_service = gcs_service or GoogleCloudStorageService()
self.dirpath = dirpath or DIRPATH
self.gcs_dirpath = self.compile_gcs_dirpath(self.dirpath)
self.local_dirpath = self.compile_local_dirpath(self.dirpath)
#print("-------------------------")
print("FILE STORAGE...")
print(" DIRPATH:", self.dirpath)
print(" GCS DIRPATH:", self.gcs_dirpath)
print(" LOCAL DIRPATH:", os.path.abspath(self.local_dirpath))
print(" WIFI ENABLED:", self.wifi)
seek_confirmation()
if not os.path.exists(self.local_dirpath):
os.makedirs(self.local_dirpath)
@staticmethod
def compile_local_dirpath(dirpath):
return os.path.join(DATA_DIR, dirpath)
@staticmethod
def compile_gcs_dirpath(dirpath):
return os.path.join("storage", "data", dirpath)
#
# REMOTE STORAGE
#
def upload_file(self, local_filepath, remote_filepath):
print(logstamp(), "UPLOADING FILE...", os.path.abspath(local_filepath))
blob = self.gcs_service.upload(local_filepath, remote_filepath)
print(logstamp(), blob) #> <Blob: impeachment-analysis-2020, storage/data/2020-05-26-0002/metadata.json, 1590465770194318>
def download_file(self, remote_filepath, local_filepath):
print(logstamp(), "DOWNLOADING FILE...", remote_filepath)
self.gcs_service.download(remote_filepath, local_filepath)
| 2,158 | 31.712121 | 130 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/__init__.py |
import os
import time
from dotenv import load_dotenv
load_dotenv()
APP_ENV = os.getenv("APP_ENV", "development")
SERVER_NAME = os.getenv("SERVER_NAME", "mjr-local") # the name of your Heroku app (e.g. "impeachment-tweet-analysis-9")
SERVER_DASHBOARD_URL = f"https://dashboard.heroku.com/apps/{SERVER_NAME}"
DATA_DIR = os.path.join(os.path.dirname(__file__), "..", "data")
def seek_confirmation():
if APP_ENV == "development":
if input("CONTINUE? (Y/N): ").upper() != "Y":
print("EXITING...")
exit()
def server_sleep(seconds=None):
seconds = seconds or (6 * 60 * 60) # 6 hours
if APP_ENV == "production":
print(f"SERVER '{SERVER_NAME.upper()}' SLEEPING...")
time.sleep(seconds)
| 745 | 26.62963 | 119 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/job.py |
import time
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
class Job():
def __init__(self):
self.start_at = None
self.end_at = None
self.duration_seconds = None
self.counter = None # represents the number of items processed
def start(self):
print("-----------------")
print("JOB STARTING!")
self.counter = 0
self.start_at = time.perf_counter()
def progress_report(self):
print(logstamp(), fmt_n(self.counter))
def end(self):
print("-----------------")
print("JOB COMPLETE!")
self.end_at = time.perf_counter()
self.duration_seconds = round(self.end_at - self.start_at, 2)
print(f"PROCESSED {fmt_n(self.counter)} ITEMS IN {fmt_n(self.duration_seconds)} SECONDS")
if __name__ == "__main__":
job = Job()
job.start()
time.sleep(3)
job.counter = 100
job.end()
| 962 | 23.692308 | 97 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_follower_graphs/pg_grapher.py | #from memory_profiler import profile
#
#from networkx import DiGraph
#
#from app import seek_confirmation
#from app.decorators.datetime_decorators import logstamp
#from app.decorators.number_decorators import fmt_n
#from app.pg_pipeline.pg_service import PgService
#from app.retweet_graphs_v2.graph_storage import GraphStorage
#from app.retweet_graphs_v2.job import Job
#
#BOT_MIN = 0.8
#BATCH_SIZE = 100
#
#
#class BotFollowerGrapher(GraphStorage, Job):
# def __init__(self, pg_service=None, bot_min=BOT_MIN, batch_size=BATCH_SIZE, storage_dirpath=None):
# self.pg_service = pg_service or PgService()
# self.bot_min = bot_min
# self.batch_size = batch_size
#
# Job.__init__(self)
#
# storage_dirpath = storage_dirpath or f"bot_follower_graphs/bot_min/{self.bot_min}"
# GraphStorage.__init__(self, dirpath=storage_dirpath)
#
# print("-------------------------")
# print("BOT FOLLOWER GRAPHER...")
# print(" BOT MIN:", self.bot_min)
# print(" BATCH SIZE:", self.batch_size)
# print("-------------------------")
#
# seek_confirmation()
#
# @property
# def metadata(self):
# return {**super().metadata, **{"bot_min": self.bot_min, "batch_size": self.batch_size}}
#
# @profile
# def perform(self):
# self.graph = DiGraph()
#
# print("FETCHING BOT FOLLOWERS...")
# self.pg_service.get_bot_follower_lists(bot_min=self.bot_min)
# while True:
# batch = self.pg_service.cursor.fetchmany(size=self.batch_size) # auto-pagination FTW
# if not batch: break # stop the loop when there's nothing left
#
# for row in batch:
# bot_id = row["bot_id"]
# self.graph.add_edges_from([(follower_id, bot_id) for follower_id in row["follower_ids"]])
#
# self.counter += len(batch)
# print(" ", logstamp(), "| BOTS:", fmt_n(self.counter))
#
# self.pg_service.close()
# print("COMPLETE!")
#
#
#if __name__ == "__main__":
#
# grapher = BotFollowerGrapher()
#
# grapher.save_metadata()
#
# grapher.start()
# grapher.perform()
# grapher.end()
# grapher.report()
#
# grapher.write_graph_to_file()
#
| 2,225 | 29.493151 | 106 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_follower_graphs/bq_grapher.py | from memory_profiler import profile
from networkx import DiGraph
from app import seek_confirmation
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.bq_service import BigQueryService
from app.retweet_graphs_v2.graph_storage import GraphStorage
from app.retweet_graphs_v2.job import Job
BOT_MIN = 0.8
BATCH_SIZE = 100
class BotFollowerGrapher(GraphStorage, Job):
def __init__(self, bq_service=None, bot_min=BOT_MIN, batch_size=BATCH_SIZE, storage_dirpath=None):
self.bq_service = bq_service or BigQueryService()
self.bot_min = bot_min
self.batch_size = batch_size
Job.__init__(self)
storage_dirpath = storage_dirpath or f"bot_follower_graphs/bot_min/{self.bot_min}"
GraphStorage.__init__(self, dirpath=storage_dirpath)
print("-------------------------")
print("BOT FOLLOWER GRAPHER...")
print(" BOT MIN:", self.bot_min)
print(" BATCH SIZE:", self.batch_size)
print("-------------------------")
seek_confirmation()
@property
def metadata(self):
return {**super().metadata, **{"bot_min": self.bot_min, "batch_size": self.batch_size}}
@profile
def perform(self):
self.graph = DiGraph()
print("FETCHING BOT FOLLOWERS...")
for row in self.bq_service.fetch_bot_follower_lists(bot_min=self.bot_min):
bot_id = row["bot_id"]
self.graph.add_edges_from([(follower_id, bot_id) for follower_id in row["follower_ids"]])
self.counter += 1
if self.counter % self.batch_size == 0:
print(" ", logstamp(), "| BOTS:", fmt_n(self.counter))
if __name__ == "__main__":
grapher = BotFollowerGrapher()
grapher.save_metadata()
grapher.start()
grapher.perform()
grapher.end()
grapher.report()
grapher.write_graph_to_file()
| 1,924 | 28.166667 | 102 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_follower_graphs/bq_prep.py |
from app.bq_service import BigQueryService
if __name__ == "__main__":
bq_service = BigQueryService(verbose=True)
print("FLATTENING USER FRIENDS TABLE...")
bq_service.destructively_migrate_user_friends_flat()
print("BOTS ABOVE 80...")
bq_service.destructively_migrate_bots_table()
print("BOT FOLLOWERS ABOVE 80...")
bq_service.destructively_migrate_bot_followers_table()
| 404 | 24.3125 | 58 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_analysis/top_status_tags.py |
import os
from collections import Counter
from pandas import DataFrame, read_csv
import re
from app.bq_service import BigQueryService
from app.file_storage import FileStorage
from app.job import Job
from app.decorators.number_decorators import fmt_n
LIMIT = os.getenv("LIMIT") # 1000 # None # os.getenv("LIMIT")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="25_000")) # 100
DESTRUCTIVE = (os.getenv("DESTRUCTIVE", default="false") == "true") # True
TAGS_DESTRUCTIVE = (os.getenv("TAGS_DESTRUCTIVE", default="false") == "true") # True
TWITTER_PATTERN = r'[^a-zA-Z ^0-9 # @]' # alphanumeric, plus hashtag and handle symbols (twitter-specific)
def download_tweets():
job = Job()
bq_service = BigQueryService()
job.start()
records = []
for row in bq_service.fetch_statuses_with_tags(limit=LIMIT):
#print(row)
records.append(dict(row))
job.counter +=1
if job.counter % BATCH_SIZE == 0:
job.progress_report()
job.end()
return DataFrame(records)
def parse_hashtags(status_text):
txt = re.sub(TWITTER_PATTERN, "", status_text.upper())
tags = [token.upper() for token in status_text.split() if token.startswith("#") and not token.endswith("#")]
return tags
def summarize_token_frequencies(tokens_series):
"""
Param token_sets : a list of tokens for each document in a collection
Returns a DataFrame with a row per topic and columns for various TF/IDF-related scores.
"""
print("COMPUTING TOKEN AND DOCUMENT FREQUENCIES...")
token_counter = Counter()
doc_counter = Counter()
my_counter = Counter()
for tokens in tokens_series:
token_counter.update(tokens)
doc_counter.update(set(tokens)) # removes duplicate tokens so they only get counted once per doc!
token_counts = zip(token_counter.keys(), token_counter.values())
doc_counts = zip(doc_counter.keys(), doc_counter.values())
token_df = DataFrame(token_counts, columns=["token", "count"])
doc_df = DataFrame(doc_counts, columns=["token", "doc_count"])
df = doc_df.merge(token_df, on="token")
df["rank"] = df["count"].rank(method="first", ascending=False)
df["pct"] = df["count"] / df["count"].sum()
df["doc_pct"] = df["doc_count"] / len(tokens_series)
#df = df.sort_values(by="rank")
#df["running_pct"] = df["pct"].cumsum()
return df.reindex(columns=["token", "rank", "count", "pct", "doc_count", "doc_pct"]).sort_values(by="rank")
if __name__ == "__main__":
storage = FileStorage(dirpath="bot_analysis")
tweets_csv_filepath = os.path.join(storage.local_dirpath, "statuses_with_tags.csv")
if os.path.isfile(tweets_csv_filepath) and not DESTRUCTIVE:
print("LOADING TWEETS...")
tweets_df = read_csv(tweets_csv_filepath)
else:
print("DOWNLOADING TWEETS...")
tweets_df = download_tweets()
tweets_df.to_csv(tweets_csv_filepath, index=False)
print(fmt_n(len(tweets_df)))
print(tweets_df.head())
print("TOKENIZING TWEETS...")
#tweets_df["status_tags"] = tweets_df["status_text"].apply(parse_hashtags)
tweets_df["status_tags"] = tweets_df["status_text"].map(parse_hashtags)
tags_csv_filepath = os.path.join(storage.local_dirpath, "top_status_tags.csv")
top_tags_csv_filepath = os.path.join(storage.local_dirpath, "top_1000_status_tags.csv")
if os.path.isfile(tags_csv_filepath) and not TAGS_DESTRUCTIVE:
print("LOADING TOP TAGS...")
tags_df = read_csv(tags_csv_filepath)
else:
print("SUMMARIZING...")
tags_df = summarize_token_frequencies(tweets_df["status_tags"])
tags_df.to_csv(tags_csv_filepath, index=False)
tags_df.head(1000).to_csv(top_tags_csv_filepath, index=False)
print(tags_df.head())
for is_bot, filtered_df in tweets_df.groupby(["is_bot"]):
bot_or_human = {True: "bot", False: "human"}[is_bot]
print(bot_or_human.upper())
bh_tags_df = summarize_token_frequencies(filtered_df["status_tags"])
print(bh_tags_df.head())
bh_tags_csv_filepath = os.path.join(storage.local_dirpath, f"top_{bot_or_human}_status_tags.csv")
bh_top_tags_csv_filepath = os.path.join(storage.local_dirpath, f"top_1000_{bot_or_human}_status_tags.csv")
bh_tags_df.to_csv(bh_tags_csv_filepath, index=False)
bh_tags_df.head(1000).to_csv(bh_top_tags_csv_filepath, index=False)
| 4,422 | 34.384 | 114 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_analysis/download_tweet_details_v6.py |
import os
from pandas import read_csv, DataFrame
from app.file_storage import FileStorage
from app.bq_service import BigQueryService
from app.job import Job
from app.decorators.number_decorators import fmt_n
LIMIT = os.getenv("LIMIT") # for development purposes
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="10_000"))
DESTRUCTIVE = (os.getenv("DESTRUCTIVE", default="false") == "true") # whether or not to re-download if a local file already exists
if __name__ == "__main__":
bq_service = BigQueryService()
job = Job()
storage = FileStorage(dirpath=f"user_details_v6")
tweets_csv_filepath = os.path.join(storage.local_dirpath, "tweets.csv")
if os.path.exists(tweets_csv_filepath) and not DESTRUCTIVE:
print("LOADING TWEETS...")
tweets_df = read_csv(tweets_csv_filepath)
else:
job.start()
print("DOWNLOADING TWEETS...")
records = []
for row in bq_service.fetch_tweet_details_v6(limit=LIMIT):
records.append(dict(row))
job.counter += 1
if job.counter % BATCH_SIZE == 0:
job.progress_report()
job.end()
tweets_df = DataFrame(records)
print("WRITING RECORDS TO CSV...")
tweets_df.to_csv(tweets_csv_filepath, index=False)
print("TWEETS:", fmt_n(len(tweets_df)))
| 1,331 | 30.714286 | 130 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_analysis/user_details_vq.py |
import os
from pprint import pprint
import json
from pandas import DataFrame, read_csv
from scipy.stats import ks_2samp #, ttest_ind
from app.bq_service import BigQueryService
from app.file_storage import FileStorage
from app.job import Job
from app.decorators.number_decorators import fmt_n
from app.decorators.datetime_decorators import date_to_ts
from app.ks_test.interpreter import interpret as interpret_ks, PVAL_MAX
LIMIT = os.getenv("LIMIT") # 1000 # None # os.getenv("LIMIT")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="25_000")) # 100
DESTRUCTIVE = (os.getenv("DESTRUCTIVE", default="false") == "true") # True
def load_data(csv_filepath):
if os.path.isfile(csv_filepath) and not DESTRUCTIVE:
print("LOADING CSV...")
df = read_csv(csv_filepath)
else:
print("DOWNLOADING CSV...")
df = download_data()
df.to_csv(csv_filepath, index=False)
print(fmt_n(len(df)))
print(df.head())
return df
def download_data():
job = Job()
bq_service = BigQueryService()
job.start()
records = []
for row in bq_service.fetch_user_details_vq(limit=LIMIT):
#print(row)
records.append(dict(row))
job.counter +=1
if job.counter % BATCH_SIZE == 0:
job.progress_report()
job.end()
return DataFrame(records)
def ks_test_response(x, y):
"""
Params x and y : two arrays of sample observations assumed to be drawn from a continuous distribution, sample sizes can be different.
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ks_2samp.html
"""
xy_result = ks_2samp(x, y)
response = {
"stat": xy_result.statistic,
"pval": xy_result.pvalue,
"interpret_pval_max": PVAL_MAX,
"interpret": interpret_ks(xy_result)
}
return response
if __name__ == "__main__":
storage = FileStorage(dirpath="disinformation")
csv_filepath = os.path.join(storage.local_dirpath, "user_details_vq.csv")
df = load_data(csv_filepath)
if input("PERFORM K/S TESTS? (Y/N): ").upper() == "Y":
print("-----------------------")
print("TESTING USER CREATION DATES...")
df["creation_ts"] = df["creation_date"].map(date_to_ts)
metric = "creation_ts"
json_filepath = os.path.join(storage.local_dirpath, "ks_test_creation_bots_humans.json")
x = sorted(df[df["is_bot"] == True][metric].tolist())
y = sorted(df[df["is_bot"] == False][metric].tolist())
print(f"BOTS ({fmt_n(len(x))}) VS HUMANS ({fmt_n(len(y))}) ...")
response = ks_test_response(x, y)
response["name"] = "User Creation Dates (Bot vs Human)"
pprint(response)
with open(json_filepath, "w") as json_file:
json.dump(response, json_file)
json_filepath = os.path.join(storage.local_dirpath, "ks_test_creation_disinformation.json")
x = sorted(df[df["q_status_count"] > 0][metric].tolist())
y = sorted(df[df["q_status_count"] == 0][metric].tolist())
print(f"DISINFORMATION SPREADER ({fmt_n(len(x))}) VS NOT ({fmt_n(len(y))}) ...")
response = ks_test_response(x, y)
response["name"] = "User Creation Dates (Disinformation Spreader vs Others)"
pprint(response)
with open(json_filepath, "w") as json_file:
json.dump(response, json_file)
print("-----------------------")
print("TESTING SCREEN NAME COUNTS...")
metric = "screen_name_count"
json_filepath = os.path.join(storage.local_dirpath, "ks_test_sncounts_bots_humans.json")
x = df[df["is_bot"] == True][metric]
y = df[df["is_bot"] == False][metric]
print(f"BOTS ({fmt_n(len(x))}) VS HUMANS ({fmt_n(len(y))}) ...")
print(x.value_counts())
print(y.value_counts())
response = ks_test_response(sorted(x.tolist()), sorted(y.tolist()))
response["name"] = "Screen Name Changes (Bot vs Human)"
pprint(response)
with open(json_filepath, "w") as json_file:
json.dump(response, json_file)
#t_result = ttest_ind(sorted(x.tolist()), sorted(y.tolist()))
json_filepath = os.path.join(storage.local_dirpath, "ks_test_sncounts_disinformation.json")
x = df[df["q_status_count"] > 0][metric]
y = df[df["q_status_count"] == 0][metric]
print(f"DISINFORMATION SPREADER ({fmt_n(len(x))}) VS NOT ({fmt_n(len(y))}) ...")
print(x.value_counts())
print(y.value_counts())
response = ks_test_response(sorted(x.tolist()), sorted(y.tolist()))
response["name"] = "Screen Name Changes (Disinformation Spreader vs Others)"
pprint(response)
with open(json_filepath, "w") as json_file:
json.dump(response, json_file)
#t_result = ttest_ind(sorted(x.tolist()), sorted(y.tolist()))
| 4,852 | 37.515873 | 137 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/pg_pipeline/tweets.py | import os
from dotenv import load_dotenv
load_dotenv()
START_AT = os.getenv("START_AT", default="2019-12-10")
END_AT = os.getenv("END_AT", default="2020-02-10")
from app.pg_pipeline import Pipeline
if __name__ == "__main__":
pipeline = Pipeline()
print("START_AT:", START_AT)
print("END AT:", END_AT)
pipeline.download_tweets(start_at=START_AT, end_at=END_AT)
| 383 | 19.210526 | 62 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/pg_pipeline/bot_followers.py |
import os
from app.pg_pipeline import Pipeline
BOT_MIN = float(os.getenv("BOT_MIN", default="0.8"))
if __name__ == "__main__":
pipeline = Pipeline()
pipeline.download_bot_followers(bot_min=BOT_MIN)
| 212 | 15.384615 | 52 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/pg_pipeline/pg_service.py |
from psycopg2 import connect
from psycopg2.extras import DictCursor
from app.decorators.number_decorators import fmt_n
from app.decorators.datetime_decorators import logstamp
from app.pg_pipeline.models import DATABASE_URL, USER_FRIENDS_TABLE_NAME
class PgService:
def __init__(self, database_url=DATABASE_URL):
self.database_url = database_url
self.connection = connect(self.database_url)
self.cursor = self.connection.cursor(name="pg_service_cursor", cursor_factory=DictCursor) # A NAMED CURSOR PREVENTS MEMORY ISSUES!!!!
#self.named_cursor = self.connection.cursor(name="pg_service_cursor", cursor_factory=DictCursor) # A NAMED CURSOR PREVENTS MEMORY ISSUES!!!!
#self.cursor = self.connection.cursor(cursor_factory=DictCursor) # no name cursor can execute more than one query
print("-------------------------")
print("PG SERVICE")
print(f" DATABASE URL: '{self.database_url}'")
print(" CONNECTION:", type(self.connection))
print(" CURSOR:", type(self.cursor), self.cursor.name)
#def reset_named_cursor(self, cursor_name="pg_service_cursor"):
# # Get around psycopg2.ProgrammingError: can't call .execute() on named cursors more than once
# # ... or just ditch the named cursor
# self.named_cursor = None
# self.named_cursor = self.connection.cursor(name=cursor_name, cursor_factory=DictCursor) # A NAMED CURSOR PREVENTS MEMORY ISSUES!!!!
def close(self):
"""Call this when done using the cursor."""
print("CLOSING PG CONNECTION...")
self.cursor.close()
self.connection.close()
#
# QUERY EXECUTION METHODS WITH AUTO-CLOSING
#
#def execute_query_in_batches(self, sql, batch_size):
# self.cursor.execute(sql)
# while True:
# batch = self.cursor.fetchmany(size=batch_size)
# if batch:
# yield batch
# else:
# break
# self.close()
#
#def execute_query(sql):
# self.cursor.execute(sql)
# yield from self.cursor.fetchall() # yield happens to wrap the list in another list (because the fetchall is already a generator, so we can "yield from" it)
# self.close()
#
# QUERIES
#
def get_user_friends(self, limit=None):
sql = f"SELECT id, user_id, screen_name, friend_count, friend_names FROM {USER_FRIENDS_TABLE_NAME} "
if limit:
sql += f" LIMIT {int(limit)};"
self.cursor.execute(sql)
#def get_bot_follower_lists(self, limit=None, bot_min=0.8):
# bot_min_str = str(int(bot_min * 100)) #> "80"
# sql = f"""
# SELECT bot_id, ARRAY_AGG(distinct follower_id) as follower_ids
# FROM bot_followers_above_{bot_min_str}
# GROUP BY 1
# """ # takes 90 seconds for ~25K rows
# if limit:
# sql += f" LIMIT {int(limit)};"
# self.cursor.execute(sql)
if __name__ == "__main__":
LIMIT = 100_000
BATCH_SIZE = 10_000
pg_service = PgService()
counter = 0
pg_service.get_user_friends(limit=LIMIT)
while True:
batch = pg_service.cursor.fetchmany(size=BATCH_SIZE)
if not batch: break
counter += len(batch)
print(logstamp(), fmt_n(counter))
pg_service.close()
print("COMPLETE!")
| 3,349 | 34.638298 | 164 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/pg_pipeline/community_predictions.py | #import os
#from dotenv import load_dotenv
#load_dotenv()
#START_AT = os.getenv("START_AT", default="2019-12-12")
#END_AT = os.getenv("END_AT", default="2020-02-10")
from app.pg_pipeline import Pipeline
if __name__ == "__main__":
pipeline = Pipeline()
#print("START_AT:", START_AT)
#print("END AT:", END_AT)
#pipeline.download_community_predictions(start_at=START_AT, end_at=END_AT)
pipeline.download_community_predictions()
| 451 | 22.789474 | 78 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/pg_pipeline/user_friends.py |
from app.pg_pipeline import Pipeline
if __name__ == "__main__":
pipeline = Pipeline()
pipeline.download_user_friends()
| 132 | 12.3 | 36 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/pg_pipeline/models.py |
import os
from dotenv import load_dotenv
from sqlalchemy import create_engine, Column, Integer, BigInteger, String, Boolean, DateTime, ARRAY, TIMESTAMP
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound
load_dotenv()
DATABASE_URL = os.getenv("DATABASE_URL", default="postgresql://username:password@localhost/dbname")
TWEETS_TABLE_NAME = os.getenv("TWEETS_TABLE_NAME", default="tweets")
USER_FRIENDS_TABLE_NAME = os.getenv("USER_FRIENDS_TABLE_NAME", default="user_friends") # can customize different sizes, like "user_friends_10k", for testing
USER_DETAILS_TABLE_NAME = os.getenv("USER_DETAILS_TABLE_NAME", default="user_details")
RETWEETER_DETAILS_TABLE_NAME = os.getenv("RETWEETER_DETAILS_TABLE_NAME", default="retweeter_details")
#BOT_MIN = float(os.getenv("BOT_MIN", default="0.8"))
#BOT_FOLLOWERS_TABLE_NAME = f"bot_followers_above_{int(BOT_MIN * 100)}"
COMMUNITY_PREDICTIONS_TABLE_NAME = os.getenv("COMMUNITY_PREDICTIONS_TABLE_NAME", default="2_community_predictions")
db = create_engine(DATABASE_URL)
Base = declarative_base()
Base.metadata.bind = db # fixes sqlalchemy.exc.UnboundExecutionError: Table object 'books' is not bound to an Engine or Connection. Execution can not proceed without a database to execute against.
BoundSession = sessionmaker(bind=db)
class Book(Base):
__tablename__ = "books"
id = Column(Integer, primary_key=True)
title = Column(String(128))
author = Column(String(128))
readers = Column(ARRAY(String(128)))
class Tweet(Base):
__tablename__ = TWEETS_TABLE_NAME
#status_id = Column(BigInteger, primary_key=True)
row_id = Column(Integer, primary_key=True) # use auto-generated id
status_id = Column(BigInteger) # , index=True # there are a few duplicate status ids, let's deal with them locally
status_text = Column(String(500))
truncated = Column(Boolean)
retweeted_status_id = Column(BigInteger) # , index=True
retweeted_user_id = Column(BigInteger) # , index=True
retweeted_user_screen_name = Column(String(250))
reply_status_id = Column(BigInteger)
reply_user_id = Column(BigInteger)
is_quote = Column(Boolean)
geo = Column(String(250))
created_at = Column(TIMESTAMP) # , index=True
user_id = Column(BigInteger) # , index=True
user_name = Column(String(250))
user_screen_name = Column(String(250)) # , index=True
user_description = Column(String(250))
user_location = Column(String(250))
user_verified = Column(Boolean)
user_created_at = Column(TIMESTAMP) # , index=True
class UserFriend(Base):
__tablename__ = USER_FRIENDS_TABLE_NAME
id = Column(Integer, primary_key=True)
user_id = Column(BigInteger) # , primary_key=True
screen_name = Column(String(128))
friend_count = Column(Integer)
friend_names = Column(ARRAY(String(128)))
class UserDetail(Base):
__tablename__ = USER_DETAILS_TABLE_NAME
user_id = Column(BigInteger, primary_key=True)
screen_name = Column(String(50))
name = Column(String(150))
description = Column(String(250))
location = Column(String(150))
verified = Column(Boolean)
created_at = Column(TIMESTAMP)
screen_name_count = Column(Integer)
name_count = Column(Integer)
description_count = Column(Integer)
location_count = Column(Integer)
verified_count = Column(Integer)
created_count = Column(Integer)
screen_names = Column(ARRAY(String(50)))
names = Column(ARRAY(String(150)))
descriptions = Column(ARRAY(String(250)))
locations = Column(ARRAY(String(150)))
verifieds = Column(ARRAY(Boolean))
created_ats = Column(ARRAY(TIMESTAMP))
friend_count = Column(Integer)
status_count = Column(Integer)
retweet_count = Column(Integer)
class RetweeterDetail(Base):
__tablename__ = RETWEETER_DETAILS_TABLE_NAME
user_id = Column(BigInteger, primary_key=True)
created_at = Column(TIMESTAMP)
screen_name_count = Column(Integer)
name_count = Column(Integer)
retweet_count = Column(Integer)
#class BotFollower(Base):
# __tablename__ = BOT_FOLLOWERS_TABLE_NAME
# id = Column(Integer, primary_key=True)
# bot_id = Column(BigInteger, index=True)
# follower_id = Column(BigInteger, index=True)
class CommunityPrediction(Base):
__tablename__ = COMMUNITY_PREDICTIONS_TABLE_NAME
status_id = Column(BigInteger, primary_key=True)
predicted_community_id = Column(Integer, index=True)
if __name__ == "__main__":
#breakpoint()
#Book.__table__.drop(db)
#Book.__table__.create(db)
#if not Book.__table__.exists(): Book.__table__.create(db)
#if not UserFriend.__table__.exists(): UserFriend.__table__.create(db)
Base.metadata.create_all(db)
session = BoundSession()
try:
book = session.query(Book).filter(Book.title=="Harry Potter").one()
except NoResultFound as err:
book = Book(title="Harry Potter", author="JKR", readers=["John", "Jane", "Sally"])
session.add(book)
session.commit()
print("-------")
print("BOOKS:")
books = session.query(Book)
for book in books:
print("...", book.id, "|", book.title, "|", book.author, "|", book.readers)
print("-------")
print("USER FRIENDS:")
results = db.execute(f"SELECT count(DISTINCT screen_name) as row_count FROM {USER_FRIENDS_TABLE_NAME};")
#print(results.keys()) #> ["row_count"]
#print(results.rowcount) #> 1
print("...", results.fetchone()[0]) #TODO: row factory ... results.fetchone()["row_count"]
print("-------")
print("USER DETAILS:")
results = db.execute(f"SELECT count(DISTINCT screen_name) as row_count FROM {USER_DETAILS_TABLE_NAME};")
#print(results.keys()) #> ["row_count"]
#print(results.rowcount) #> 1
print("...", results.fetchone()[0]) #TODO: row factory ... results.fetchone()["row_count"]
session.close()
| 6,147 | 37.425 | 197 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/pg_pipeline/__init__.py |
import time
import os
from dotenv import load_dotenv
from app import APP_ENV
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.bq_service import BigQueryService
from app.pg_pipeline.models import BoundSession, db, Tweet, UserFriend, UserDetail, RetweeterDetail, CommunityPrediction #, BotFollower
# todo: inherit start and end from Job class
load_dotenv()
USERS_LIMIT = os.getenv("USERS_LIMIT")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default=100))
PG_DESTRUCTIVE = (os.getenv("PG_DESTRUCTIVE", default="false") == "true")
def clean_string(dirty):
"""
Cleans a string so it can be stored in PG without raising an error.
Param: dirty (string or None) the string to be cleaned.
"""
if dirty:
clean = dirty.replace("\x00", "\uFFFD") # fixes "ValueError: A string literal cannot contain NUL (0x00) characters."
else:
clean = None
return clean
class Pipeline():
def __init__(self, users_limit=USERS_LIMIT, batch_size=BATCH_SIZE, pg_destructive=PG_DESTRUCTIVE, bq_service=None):
self.bq_service = bq_service or BigQueryService()
if users_limit:
self.users_limit = int(users_limit)
else:
self.users_limit = None
self.tweets_limit = self.users_limit # todo: combine with users_limit for a more generic rows_limit, since we usually run one script or another, so can reset the var between runs
self.batch_size = batch_size
self.pg_destructive = pg_destructive
self.pg_engine = db
self.pg_session = BoundSession()
print("-------------------------")
print("PG PIPELINE...")
print(" USERS LIMIT:", self.users_limit)
print(" BATCH SIZE:", self.batch_size)
#print(" BQ SERVICE:", type(self.bq_service))
#print(" PG SESSION:", type(self.pg_session))
print(" PG DESTRUCTIVE:", self.pg_destructive)
def start_job(self):
self.start_at = time.perf_counter()
self.batch = []
self.counter = 0
def end_job(self):
print("ETL COMPLETE!")
self.end_at = time.perf_counter()
self.duration_seconds = round(self.end_at - self.start_at, 2)
print(f"PROCESSED {self.counter} ITEMS IN {self.duration_seconds} SECONDS")
self.pg_session.close()
def destructively_migrate(self, model_class):
if self.pg_destructive and model_class.__table__.exists():
print(f"DROPPING THE {model_class.__table__.name.upper()} TABLE!")
model_class.__table__.drop(self.pg_engine)
self.pg_session.commit()
if not model_class.__table__.exists():
print(f"CREATING THE {model_class.__table__.name.upper()} TABLE!")
model_class.__table__.create(self.pg_engine)
self.pg_session.commit()
def download_tweets(self, start_at=None, end_at=None):
self.start_job()
self.destructively_migrate(Tweet)
print(logstamp(), "DATA FLOWING...")
for row in self.bq_service.fetch_tweets_in_batches(limit=self.tweets_limit, start_at=start_at, end_at=end_at):
status_text = row["status_text"]
try: status_text = clean_string(status_text[0:500]) # truncate strings over 500
finally: pass
self.batch.append({
"status_id": row["status_id"],
"status_text": status_text,
"truncated": row["truncated"],
"retweeted_status_id ": row["retweeted_status_id"],
"retweeted_user_id ": row["retweeted_user_id"],
"retweeted_user_screen_name": row["retweeted_user_screen_name"],
"reply_status_id": row["reply_status_id"],
"reply_user_id": row["reply_user_id"],
"is_quote": row["is_quote"],
"geo": clean_string(row["geo"]),
"created_at": row["created_at"],
"user_id": row["user_id"],
"user_name": clean_string(row["user_name"]),
"user_screen_name": clean_string(row["user_screen_name"]),
"user_description": clean_string(row["user_description"]),
"user_location": clean_string(row["user_location"]),
"user_verified": row["user_verified"],
"user_created_at": row["user_created_at"]
})
self.counter+=1
if len(self.batch) >= self.batch_size:
print(logstamp(), fmt_n(self.counter), "SAVING BATCH...")
self.pg_session.bulk_insert_mappings(Tweet, self.batch)
self.pg_session.commit()
self.batch = []
self.end_job()
def download_user_friends(self):
self.start_job()
self.destructively_migrate(UserFriend)
print(logstamp(), "DATA FLOWING...")
for row in self.bq_service.fetch_user_friends_in_batches(limit=self.users_limit):
self.batch.append({
"user_id": row["user_id"],
"screen_name": row["screen_name"],
"friend_count": row["friend_count"],
"friend_names": row["friend_names"]
})
self.counter+=1
if len(self.batch) >= self.batch_size:
print(logstamp(), fmt_n(self.counter), "SAVING BATCH...")
self.pg_session.bulk_insert_mappings(UserFriend, self.batch)
self.pg_session.commit()
self.batch = []
self.end_job()
def download_user_details(self):
self.start_job()
self.destructively_migrate(UserDetail)
print(logstamp(), "DATA FLOWING...")
for row in self.bq_service.fetch_user_details_in_batches(limit=self.users_limit):
item = {
"user_id": row['user_id'],
"screen_name": clean_string(row['screen_name']),
"name": clean_string(row['name']),
"description": clean_string(row['description']),
"location": clean_string(row['location']),
"verified": row['verified'],
"created_at": row['created_at'], #.strftime("%Y-%m-%d %H:%M:%S"),
"screen_name_count": row['screen_name_count'],
"name_count": row['name_count'],
"description_count": row['description_count'],
"location_count": row['location_count'],
"verified_count": row['verified_count'],
"created_count": row['created_at_count'],
"screen_names": [clean_string(s) for s in row['screen_names']],
"names": [clean_string(s) for s in row['names']],
"descriptions": [clean_string(s) for s in row['descriptions']],
"locations": [clean_string(s) for s in row['locations']],
"verifieds": row['verifieds'],
"created_ats": row['created_ats'], #[dt.strftime("%Y-%m-%d %H:%M:%S") for dt in row['_created_ats']]
"friend_count": row["friend_count"],
"status_count": row["status_count"],
"retweet_count": row["retweet_count"],
}
self.batch.append(item)
self.counter+=1
# temporarily testing individual inserts...
#record = UserDetail(**item)
#self.pg_session.add(record)
#self.pg_session.commit()
if len(self.batch) >= self.batch_size:
print(logstamp(), fmt_n(self.counter), "SAVING BATCH...")
self.pg_session.bulk_insert_mappings(UserDetail, self.batch)
self.pg_session.commit()
self.batch = []
self.end_job()
def download_retweeter_details(self):
self.start_job()
self.destructively_migrate(RetweeterDetail)
print(logstamp(), "DATA FLOWING...")
for row in self.bq_service.fetch_retweeter_details_in_batches(limit=self.users_limit):
item = {
"user_id": row['user_id'],
"verified": row["verified"],
"created_at": row["created_at"],
"screen_name_count": row["screen_name_count"],
"name_count": row["name_count"],
"retweet_count": row["retweet_count"],
}
self.batch.append(item)
self.counter+=1
# temporarily testing individual inserts...
#record = RetweeterDetail(**item)
#self.pg_session.add(record)
#self.pg_session.commit()
if len(self.batch) >= self.batch_size:
print(logstamp(), fmt_n(self.counter), "SAVING BATCH...")
self.pg_session.bulk_insert_mappings(RetweeterDetail, self.batch)
self.pg_session.commit()
self.batch = []
self.end_job()
#def download_bot_followers(self, bot_min=0.8):
# self.start_job()
# self.destructively_migrate(BotFollower)
#
# print(logstamp(), "DATA FLOWING...")
# for row in self.bq_service.fetch_bot_followers_in_batches(bot_min=bot_min):
# self.batch.append({"bot_id": row["bot_id"], "follower_id": row["follower_id"]})
# self.counter+=1
#
# if len(self.batch) >= self.batch_size:
# print(logstamp(), fmt_n(self.counter), "SAVING BATCH...")
# self.pg_session.bulk_insert_mappings(BotFollower, self.batch)
# self.pg_session.commit()
# self.batch = []
#
# self.end_job()
def download_community_predictions(self, start_at=None, end_at=None):
self.start_job()
self.destructively_migrate(CommunityPrediction)
print(logstamp(), "DATA FLOWING...")
for row in self.bq_service.fetch_predictions(limit=self.tweets_limit):
self.batch.append(dict(row))
self.counter+=1
if len(self.batch) >= self.batch_size:
print(logstamp(), fmt_n(self.counter), "SAVING BATCH...")
self.pg_session.bulk_insert_mappings(CommunityPrediction, self.batch)
self.pg_session.commit()
self.batch = []
self.end_job()
if __name__ == "__main__":
pipeline = Pipeline()
print(dir(pipeline))
| 10,402 | 38.555133 | 186 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/pg_pipeline/user_details.py |
from app.pg_pipeline import Pipeline
if __name__ == "__main__":
pipeline = Pipeline()
pipeline.download_user_details() # takes about 50 minutes for 3.6M users in batches of 2500
| 191 | 18.2 | 95 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/pg_pipeline/retweeter_details.py |
from app.pg_pipeline import Pipeline
if __name__ == "__main__":
pipeline = Pipeline()
pipeline.download_retweeter_details() # takes about 16 minutes for 2.7M users in batches of 2500
| 196 | 18.7 | 100 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/pg_pipeline/investigate.py |
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.pg_pipeline.pg_service import PgService
def fetch_in_batches():
pg_service = PgService()
pg_service.cursor.execute("SELECT user_id, screen_name, friend_count FROM user_friends LIMIT 1000;")
while True:
batch = pg_service.cursor.fetchmany(size=100)
if batch:
yield batch
else:
break
pg_service.close()
print("COMPLETE!")
def batch_perform():
counter = 0
for batch in fetch_in_batches():
counter += len(batch)
print(logstamp(), fmt_n(counter))
#
def execute_query(sql):
pg_service = PgService()
pg_service.cursor.execute(sql)
yield from pg_service.cursor.fetchall() # yield happens to wrap the list in another list (because the fetchall is already a generator, so we can "yield from" it)
pg_service.close()
print("COMPLETE!")
def perform():
generator = execute_query("SELECT user_id, screen_name, friend_count FROM user_friends LIMIT 100;")
results = list(generator)
print(logstamp(), fmt_n(len(results)))
if __name__ == "__main__":
batch_perform()
#> 2020-08-29 09:44:16 100
#> 2020-08-29 09:44:16 200
#> 2020-08-29 09:44:16 300
#> 2020-08-29 09:44:16 400
#> 2020-08-29 09:44:16 500
#> 2020-08-29 09:44:16 600
#> 2020-08-29 09:44:16 700
#> 2020-08-29 09:44:16 800
#> 2020-08-29 09:44:16 900
#> 2020-08-29 09:44:16 1,000
#> CLOSING PG CONNECTION...
#> COMPLETE!
#perform()
#> CLOSING PG CONNECTION...
#> COMPLETE!
#> 2020-08-29 09:51:55 100
| 1,658 | 26.65 | 165 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_impact_v4/daily_active_edge_downloader.py |
import os
from pandas import read_csv, DataFrame
from app.retweet_graphs_v2.k_days.generator import DateRangeGenerator
from app.file_storage import FileStorage
from app.bq_service import BigQueryService
from app.job import Job
from app.decorators.number_decorators import fmt_n
LIMIT = os.getenv("LIMIT") # for development purposes
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="100000"))
DESTRUCTIVE = (os.getenv("DESTRUCTIVE", default="false") == "true") # whether or not to re-download if a local file already exists
NODES_LIMIT = os.getenv("NODES_LIMIT") # for development purposes
NODES_BATCH_SIZE = int(os.getenv("NODES_BATCH_SIZE", default="5000"))
NODES_DESTRUCTIVE = (os.getenv("NODES_DESTRUCTIVE", default="false") == "true") # whether or not to re-download if a local file already exists
if __name__ == "__main__":
gen = DateRangeGenerator(k_days=1)
bq_service = BigQueryService()
job = Job()
for dr in gen.date_ranges:
print(dr.start_date)
storage = FileStorage(dirpath=f"daily_active_edge_friend_graphs_v5/{dr.start_date}")
tweets_csv_filepath = os.path.join(storage.local_dirpath, "tweets.csv")
nodes_csv_filepath = os.path.join(storage.local_dirpath, "nodes.csv")
if os.path.exists(tweets_csv_filepath) and not DESTRUCTIVE:
print("LOADING TWEETS...")
tweets_df = read_csv(tweets_csv_filepath)
else:
job.start()
print("DOWNLOADING TWEETS...")
records = []
for row in bq_service.fetch_daily_statuses_with_opinion_scores(date=dr.start_date, limit=LIMIT):
records.append(dict(row))
job.counter += 1
if job.counter % BATCH_SIZE == 0:
job.progress_report()
job.end()
tweets_df = DataFrame(records)
tweets_df.to_csv(tweets_csv_filepath)
del records
print("TWEETS:", fmt_n(len(tweets_df)))
if os.path.exists(nodes_csv_filepath) and not NODES_DESTRUCTIVE:
print("LOADING NODES...")
nodes_df = read_csv(nodes_csv_filepath)
else:
job.start()
print("DOWNLOADING NODES...")
records = []
for row in bq_service.fetch_daily_nodes_with_active_edges(date=dr.start_date, limit=NODES_LIMIT):
records.append(dict(row))
job.counter += 1
if job.counter % NODES_BATCH_SIZE == 0:
job.progress_report()
job.end()
nodes_df = DataFrame(records)
nodes_df.to_csv(nodes_csv_filepath)
del records
print("NODES:", fmt_n(len(nodes_df)))
| 2,704 | 38.779412 | 142 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_impact_v4/daily_active_edge_friend_grapher_v2.py |
import os
from pandas import DataFrame, read_csv
from networkx import DiGraph, write_gpickle, read_gpickle
from memory_profiler import profile
from app.decorators.number_decorators import fmt_n
from app.job import Job
from app.bq_service import BigQueryService
from app.file_storage import FileStorage
DATE = os.getenv("DATE", default="2020-01-23")
TWEET_MIN = int(os.getenv("TWEET_MIN", default="1")) # CHANGED
LIMIT = os.getenv("LIMIT")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="100000"))
DESTRUCTIVE = (os.getenv("DESTRUCTIVE", default="false") == "true")
#GRAPH_LIMIT = os.getenv("GRAPH_LIMIT")
GRAPH_BATCH_SIZE = int(os.getenv("GRAPH_BATCH_SIZE", default="10000"))
GRAPH_DESTRUCTIVE = (os.getenv("GRAPH_DESTRUCTIVE", default="false") == "true")
#@profile
def load_graph(local_graph_filepath):
print("LOADING GRAPH...")
graph = read_gpickle(local_graph_filepath)
print(type(graph), fmt_n(graph.number_of_nodes()), fmt_n(graph.number_of_edges()))
return graph
if __name__ == "__main__":
print("------------------------")
print("GRAPHER...")
print(" DATE:", DATE)
print(" TWEET_MIN:", TWEET_MIN)
print(" LIMIT:", LIMIT)
print(" BATCH_SIZE:", BATCH_SIZE)
print(" DESTRUCTIVE:", DESTRUCTIVE)
#print(" GRAPH_LIMIT:", GRAPH_LIMIT)
print(" GRAPH_BATCH_SIZE:", GRAPH_BATCH_SIZE)
print(" GRAPH_DESTRUCTIVE:", GRAPH_DESTRUCTIVE)
print("------------------------")
storage = FileStorage(dirpath=f"daily_active_friend_graphs_v4/{DATE}/tweet_min/{TWEET_MIN}")
tweets_csv_filepath = os.path.join(storage.local_dirpath, "tweets.csv")
bq_service = BigQueryService()
job = Job()
#
# LOAD TWEETS
# tweet_id, text, screen_name, bot, created_at
# TODO: de-dup RTs so the model will only train/test on a single RT status text (PREVENT OVERFITTING)
if os.path.exists(tweets_csv_filepath) and not DESTRUCTIVE:
print("LOADING TWEETS...")
statuses_df = read_csv(tweets_csv_filepath)
else:
job.start()
print("DOWNLOADING TWEETS...")
statuses = []
for row in bq_service.fetch_daily_active_tweeter_statuses_for_model_training(date=DATE, tweet_min=TWEET_MIN, limit=LIMIT):
statuses.append(dict(row))
job.counter += 1
if job.counter % BATCH_SIZE == 0:
job.progress_report()
job.end()
statuses_df = DataFrame(statuses)
del statuses
statuses_df.to_csv(tweets_csv_filepath)
print("STATUSES:", fmt_n(len(statuses_df)))
#
# MAKE GRAPH
local_nodes_csv_filepath = os.path.join(storage.local_dirpath, "active_nodes.csv")
local_graph_csv_filepath = os.path.join(storage.local_dirpath, "active_edge_graph.csv") #CHANGED
if os.path.exists(local_nodes_csv_filepath) and os.path.exists(local_graph_csv_filepath) and not GRAPH_DESTRUCTIVE:
nodes_df = read_csv(local_nodes_csv_filepath)
graph_df = read_csv(local_graph_csv_filepath)
else:
nodes_df = statuses_df.copy()
nodes_df = nodes_df[["user_id", "screen_name","rate","bot"]]
nodes_df.drop_duplicates(inplace=True)
print("NODES:", fmt_n(len(nodes_df)))
print(nodes_df.head())
nodes_df.to_csv(local_nodes_csv_filepath)
del statuses_df
job.start()
print("ACTIVE EDGES...")
active_edges = []
for row in bq_service.fetch_daily_active_edge_friends_for_csv(date=DATE, tweet_min=TWEET_MIN, limit=LIMIT): # CHANGED
active_edges.append(dict(row))
job.counter += 1
if job.counter % GRAPH_BATCH_SIZE == 0:
job.progress_report()
job.end()
graph_df = DataFrame(active_edges)
print(fmt_n(len(graph_df)))
print(graph_df.head())
print("SAVING GRAPH TO CSV...")
graph_df.to_csv(local_graph_csv_filepath)
# todo: upload straight to google drive
| 3,954 | 32.803419 | 130 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_impact_v4/active_edge_v6_downloader.py |
import os
from pandas import read_csv, DataFrame
from app.file_storage import FileStorage
from app.bq_service import BigQueryService
from app.job import Job
from app.decorators.number_decorators import fmt_n
LIMIT = os.getenv("LIMIT") # for development purposes
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="10_000"))
DESTRUCTIVE = (os.getenv("DESTRUCTIVE", default="false") == "true") # whether or not to re-download if a local file already exists
if __name__ == "__main__":
bq_service = BigQueryService()
job = Job()
storage = FileStorage(dirpath=f"nodes_with_active_edges_v6")
nodes_csv_filepath = os.path.join(storage.local_dirpath, "nodes.csv")
if os.path.exists(nodes_csv_filepath) and not DESTRUCTIVE:
print("LOADING NODES...")
nodes_df = read_csv(nodes_csv_filepath)
else:
job.start()
print("DOWNLOADING NODES...")
records = []
for row in bq_service.fetch_nodes_with_active_edges_v6(limit=LIMIT):
records.append(dict(row))
job.counter += 1
if job.counter % BATCH_SIZE == 0:
job.progress_report()
job.end()
nodes_df = DataFrame(records)
nodes_df.to_csv(nodes_csv_filepath, index=False)
print("NODES:", fmt_n(len(nodes_df)))
| 1,296 | 31.425 | 130 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_impact_v4/daily_active_edge_friend_grapher.py |
import os
from pandas import DataFrame, read_csv
from networkx import DiGraph, write_gpickle, read_gpickle
from memory_profiler import profile
from app.decorators.number_decorators import fmt_n
from app.job import Job
from app.bq_service import BigQueryService
from app.file_storage import FileStorage
DATE = os.getenv("DATE", default="2020-01-23")
TWEET_MIN = int(os.getenv("TWEET_MIN", default="1")) # CHANGED
LIMIT = os.getenv("LIMIT")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="100000"))
DESTRUCTIVE = (os.getenv("DESTRUCTIVE", default="false") == "true")
#GRAPH_LIMIT = os.getenv("GRAPH_LIMIT")
GRAPH_BATCH_SIZE = int(os.getenv("GRAPH_BATCH_SIZE", default="10000"))
GRAPH_DESTRUCTIVE = (os.getenv("GRAPH_DESTRUCTIVE", default="false") == "true")
import json
from networkx.readwrite import json_graph
import numpy as np
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
#@profile
def save_graph_as_json(graph, local_json_graph_filepath):
print("CONVERTING GRAPH TO JSON...")
data = json_graph.node_link_data(graph)
print(type(data))
print("SAVING JSON GRAPH...", local_json_graph_filepath)
with open(local_json_graph_filepath, "w") as json_file:
json.dump(data, json_file, indent=4, cls=NpEncoder)
#@profile
def load_graph(local_graph_filepath):
print("LOADING GRAPH...")
graph = read_gpickle(local_graph_filepath)
print(type(graph), fmt_n(graph.number_of_nodes()), fmt_n(graph.number_of_edges()))
return graph
if __name__ == "__main__":
print("------------------------")
print("GRAPHER...")
print(" DATE:", DATE)
print(" TWEET_MIN:", TWEET_MIN)
print(" LIMIT:", LIMIT)
print(" BATCH_SIZE:", BATCH_SIZE)
print(" DESTRUCTIVE:", DESTRUCTIVE)
#print(" GRAPH_LIMIT:", GRAPH_LIMIT)
print(" GRAPH_BATCH_SIZE:", GRAPH_BATCH_SIZE)
print(" GRAPH_DESTRUCTIVE:", GRAPH_DESTRUCTIVE)
print("------------------------")
storage = FileStorage(dirpath=f"daily_active_friend_graphs_v4/{DATE}/tweet_min/{TWEET_MIN}")
tweets_csv_filepath = os.path.join(storage.local_dirpath, "tweets.csv")
bq_service = BigQueryService()
job = Job()
#
# LOAD TWEETS
# tweet_id, text, screen_name, bot, created_at
# TODO: de-dup RTs so the model will only train/test on a single RT status text (PREVENT OVERFITTING)
if os.path.exists(tweets_csv_filepath) and not DESTRUCTIVE:
print("LOADING TWEETS...")
statuses_df = read_csv(tweets_csv_filepath)
else:
job.start()
print("DOWNLOADING TWEETS...")
statuses = []
for row in bq_service.fetch_daily_active_tweeter_statuses(date=DATE, tweet_min=TWEET_MIN, limit=LIMIT):
statuses.append(dict(row))
job.counter += 1
if job.counter % BATCH_SIZE == 0:
job.progress_report()
job.end()
statuses_df = DataFrame(statuses)
del statuses
statuses_df.to_csv(tweets_csv_filepath)
print("STATUSES:", fmt_n(len(statuses_df)))
#
# MAKE GRAPH
# TODO: export graph as CSV format for TZ
# and optionally also construct the gpickle and json graph objects
local_nodes_csv_filepath = os.path.join(storage.local_dirpath, "active_nodes.csv")
local_graph_csv_filepath = os.path.join(storage.local_dirpath, "active_edge_graph.csv") #CHANGED
if os.path.exists(local_nodes_csv_filepath) and os.path.exists(local_graph_csv_filepath) and not GRAPH_DESTRUCTIVE:
nodes_df = read_csv(local_nodes_csv_filepath)
graph_df = read_csv(local_graph_csv_filepath)
else:
nodes_df = statuses_df.copy()
nodes_df = nodes_df[["user_id", "screen_name","rate","bot"]]
nodes_df.drop_duplicates(inplace=True)
print("NODES:", fmt_n(len(nodes_df)))
print(nodes_df.head())
nodes_df.to_csv(local_nodes_csv_filepath)
del statuses_df
job.start()
print("ACTIVE EDGES...")
active_edges = []
for row in bq_service.fetch_daily_active_edge_friends_for_csv(date=DATE, tweet_min=TWEET_MIN, limit=LIMIT): # CHANGED
active_edges.append(dict(row))
job.counter += 1
if job.counter % GRAPH_BATCH_SIZE == 0:
job.progress_report()
job.end()
graph_df = DataFrame(active_edges)
print(fmt_n(len(graph_df)))
print(graph_df.head())
print("SAVING GRAPH TO CSV...")
graph_df.to_csv(local_graph_csv_filepath)
# todo: upload straight to google drive
exit()
local_graph_filepath = os.path.join(storage.local_dirpath, "active_edge_graph.gpickle") #CHANGED
gcs_graph_filepath = os.path.join(storage.gcs_dirpath, "active_edge_graph.gpickle") #CHANGED
if os.path.exists(local_graph_filepath) and not GRAPH_DESTRUCTIVE:
graph = load_graph(local_graph_filepath)
else:
nodes_df = statuses_df.copy()
nodes_df = nodes_df[["user_id", "screen_name","rate","bot"]]
nodes_df.drop_duplicates(inplace=True)
print("NODES:", fmt_n(len(nodes_df)))
print(nodes_df.head())
del statuses_df
print("CREATING GRAPH...")
graph = DiGraph()
job.start()
print("NODES...")
# for each unique node in the list, add a node to the graph.
for i, row in nodes_df.iterrows():
graph.add_node(row["screen_name"], user_id=row["user_id"], rate=row["rate"], bot=row["bot"])
job.counter += 1
if job.counter % GRAPH_BATCH_SIZE == 0:
job.progress_report()
job.end()
del nodes_df
job.start()
print("EDGES...")
for row in bq_service.fetch_daily_active_edge_friends(date=DATE, tweet_min=TWEET_MIN, limit=LIMIT): # CHANGED
graph.add_edges_from([(row["screen_name"], friend) for friend in row["friend_names"]])
job.counter += 1
if job.counter % GRAPH_BATCH_SIZE == 0:
job.progress_report()
job.end()
print(type(graph), fmt_n(graph.number_of_nodes()), fmt_n(graph.number_of_edges()))
print("SAVING GRAPH...")
write_gpickle(graph, local_graph_filepath)
#del graph
#storage.upload_file(local_graph_filepath, gcs_graph_filepath)
local_json_graph_filepath = os.path.join(storage.local_dirpath, "active_edge_graph.json") #CHANGED
gcs_json_graph_filepath = os.path.join(storage.gcs_dirpath, "active_edge_graph.json")
save_graph_as_json(graph, local_json_graph_filepath)
#storage.upload_file(local_json_graph_filepath, gcs_json_graph_filepath)
| 6,941 | 29.716814 | 125 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_impact_v4/active_edge_v7_downloader.py |
import os
from pandas import read_csv, DataFrame
from app.file_storage import FileStorage
from app.bq_service import BigQueryService
from app.job import Job
from app.decorators.number_decorators import fmt_n
LIMIT = os.getenv("LIMIT") # for development purposes
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="10_000"))
DESTRUCTIVE = (os.getenv("DESTRUCTIVE", default="false") == "true") # whether or not to re-download if a local file already exists
if __name__ == "__main__":
bq_service = BigQueryService()
job = Job()
storage = FileStorage(dirpath=f"nodes_with_active_edges_v7")
nodes_csv_filepath = os.path.join(storage.local_dirpath, "sn_nodes.csv")
if os.path.exists(nodes_csv_filepath) and not DESTRUCTIVE:
print("LOADING SN NODES...")
nodes_df = read_csv(nodes_csv_filepath)
else:
job.start()
print("DOWNLOADING SN NODES...")
records = []
for row in bq_service.fetch_sn_nodes_with_active_edges_v7(limit=LIMIT):
records.append(dict(row))
job.counter += 1
if job.counter % BATCH_SIZE == 0:
job.progress_report()
job.end()
nodes_df = DataFrame(records)
nodes_df.to_csv(nodes_csv_filepath, index=False)
print("SN NODES:", fmt_n(len(nodes_df)))
#nodes_csv_filepath = os.path.join(storage.local_dirpath, "id_nodes.csv")
#if os.path.exists(nodes_csv_filepath) and not DESTRUCTIVE:
# print("LOADING NODES...")
# nodes_df = read_csv(nodes_csv_filepath)
#else:
# job.start()
# print("DOWNLOADING NODES...")
# records = []
# for row in bq_service.fetch_nodes_with_active_edges_v7_sn(limit=LIMIT):
# records.append(dict(row))
# job.counter += 1
# if job.counter % BATCH_SIZE == 0:
# job.progress_report()
# job.end()
# nodes_df = DataFrame(records)
# nodes_df.to_csv(nodes_csv_filepath, index=False)
print("ID NODES:", fmt_n(len(nodes_df)))
| 2,028 | 33.982759 | 130 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_impact_v4/daily_active_user_friend_grapher.py |
import os
from pandas import DataFrame, read_csv
from networkx import DiGraph, write_gpickle, read_gpickle
from app.decorators.number_decorators import fmt_n
from app.job import Job
from app.bq_service import BigQueryService
from app.file_storage import FileStorage
DATE = os.getenv("DATE", default="2020-01-23")
TWEET_MIN = os.getenv("TWEET_MIN")
LIMIT = os.getenv("LIMIT")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="100000"))
DESTRUCTIVE = (os.getenv("DESTRUCTIVE", default="false") == "true")
#GRAPH_LIMIT = os.getenv("GRAPH_LIMIT")
GRAPH_BATCH_SIZE = int(os.getenv("GRAPH_BATCH_SIZE", default="10000"))
GRAPH_DESTRUCTIVE = (os.getenv("GRAPH_DESTRUCTIVE", default="false") == "true")
if __name__ == "__main__":
print("------------------------")
print("GRAPHER...")
print(" DATE:", DATE)
print(" TWEET_MIN:", TWEET_MIN)
print(" LIMIT:", LIMIT)
print(" BATCH_SIZE:", BATCH_SIZE)
print(" DESTRUCTIVE:", DESTRUCTIVE)
#print(" GRAPH_LIMIT:", GRAPH_LIMIT)
print(" GRAPH_BATCH_SIZE:", GRAPH_BATCH_SIZE)
print(" GRAPH_DESTRUCTIVE:", GRAPH_DESTRUCTIVE)
print("------------------------")
storage = FileStorage(dirpath=f"daily_active_friend_graphs_v4/{DATE}/tweet_min/{TWEET_MIN}")
tweets_csv_filepath = os.path.join(storage.local_dirpath, "tweets.csv")
bq_service = BigQueryService()
job = Job()
#
# LOAD TWEETS
# tweet_id, text, screen_name, bot, created_at
if os.path.exists(tweets_csv_filepath) and not DESTRUCTIVE:
print("LOADING TWEETS...")
statuses_df = read_csv(tweets_csv_filepath)
else:
job.start()
print("DOWNLOADING TWEETS...")
statuses = []
for row in bq_service.fetch_daily_active_tweeter_statuses(date=DATE, tweet_min=TWEET_MIN, limit=LIMIT):
statuses.append(dict(row))
job.counter += 1
if job.counter % BATCH_SIZE == 0:
job.progress_report()
job.end()
statuses_df = DataFrame(statuses)
del statuses
statuses_df.to_csv(tweets_csv_filepath)
print(fmt_n(len(statuses_df)))
#
# MAKE GRAPH
local_graph_filepath = os.path.join(storage.local_dirpath, "graph.gpickle")
gcs_graph_filepath = os.path.join(storage.gcs_dirpath, "graph.gpickle")
if os.path.exists(local_graph_filepath) and not GRAPH_DESTRUCTIVE:
print("LOADING GRAPH...")
graph = read_gpickle(local_graph_filepath)
print(type(graph), graph.number_of_nodes(), graph.number_of_edges())
else:
nodes_df = statuses_df.copy()
nodes_df = nodes_df[["user_id", "screen_name","rate","bot"]]
nodes_df.drop_duplicates(inplace=True)
print(len(nodes_df))
print(nodes_df.head())
print("CREATING GRAPH...")
graph = DiGraph()
job.start()
print("NODES...")
# for each unique node in the list, add a node to the graph.
for i, row in nodes_df.iterrows():
graph.add_node(row["screen_name"], user_id=row["user_id"], rate=row["rate"], bot=row["bot"])
job.counter += 1
if job.counter % GRAPH_BATCH_SIZE == 0:
job.progress_report()
job.end()
job.start()
print("EDGES...")
for row in bq_service.fetch_daily_active_user_friends(date=DATE, tweet_min=TWEET_MIN, limit=LIMIT):
graph.add_edges_from([(row["screen_name"], friend) for friend in row["friend_names"]])
job.counter += 1
if job.counter % GRAPH_BATCH_SIZE == 0:
job.progress_report()
job.end()
print(type(graph), fmt_n(graph.number_of_nodes()), fmt_n(graph.number_of_edges()))
write_gpickle(graph, local_graph_filepath)
del graph
storage.upload_file(local_graph_filepath, gcs_graph_filepath)
#breakpoint()
#metadata = {
# "bq_service":bq_service.metadata,
# "date": DATE,
# "tweet_min": TWEET_MIN,
# "job_params":{
# "limit": LIMIT,
# "batch_size": BATCH_SIZE,
# "graph_batch_size": GRAPH_BATCH_SIZE,
# },
# "results":{
# "tweets": len(statuses_df),
# "nodes": graph.number_of_nodes(),
# "edges": graph.number_of_edges(),
# }
#}
## save metadata to JSON
#local_metadata_filepath =
#gcs_metadata_filepath =
#storage.upload_file(local_metadata_filepath, gcs_metadata_filepath)
| 4,479 | 31.941176 | 111 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp_v2/model_promotion.py | import os
from app.nlp.model_storage import ModelStorage
SOURCE = os.getenv("SOURCE", default="nlp_v2/models/dev/multinomial_nb")
DESTINATION = os.getenv("DESTINATION", default="nlp_v2/models/best/multinomial_nb")
if __name__ == "__main__":
storage = ModelStorage(dirpath=SOURCE)
storage.promote_model(destination=DESTINATION)
| 340 | 25.230769 | 83 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp_v2/bulk_predict.py | import os
from pandas import DataFrame, read_csv
from app import seek_confirmation, DATA_DIR
from app.job import Job
from app.bq_service import BigQueryService
from app.nlp.model_storage import ModelStorage
LIMIT = os.getenv("LIMIT")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="100000"))
CSV_FILEPATH = os.path.join(DATA_DIR, "nlp_v2", "all_statuses.csv")
DESTRUCTIVE = (os.getenv("DESTRUCTIVE", default="false") == "true") # whether or not to re-download if a local file already exists
def save_batch(batch, csv_filepath=CSV_FILEPATH):
batch_df = DataFrame(batch, columns=["status_id", "status_text"])
if os.path.isfile(csv_filepath):
batch_df.to_csv(csv_filepath, mode="a", header=False, index=False)
else:
batch_df.to_csv(csv_filepath, index=False)
if __name__ == "__main__":
bq_service = BigQueryService()
job = Job()
if DESTRUCTIVE or not os.path.isfile(CSV_FILEPATH):
job.start()
batch = []
for row in bq_service.nlp_v2_fetch_statuses(limit=LIMIT):
batch.append({"status_id": row["status_id"], "status_text": row["status_text"]})
job.counter += 1
if job.counter % BATCH_SIZE == 0:
save_batch(batch)
batch = []
job.progress_report()
if len(batch) > 0:
save_batch(batch)
batch = []
job.end()
seek_confirmation()
#exit()
for model_name in ["logistic_regression", "multinomial_nb"]:
storage = ModelStorage(dirpath=f"nlp_v2/models/best/{model_name}")
tv = storage.load_vectorizer()
clf = storage.load_model()
print(f"DESTROY PREDICTIONS TABLE? ({model_name})")
seek_confirmation()
bq_service.nlp_v2_destructively_migrate_predictions_table(model_name)
predictions_table = bq_service.nlp_v2_get_predictions_table(model_name) # API call. cache it here once.
job.start()
for chunk_df in read_csv(CSV_FILEPATH, chunksize=BATCH_SIZE): # FYI: this will include the last chunk even if it is not a full batch
status_ids = chunk_df["status_id"].tolist()
status_texts = chunk_df["status_text"].tolist()
preds = clf.predict(tv.transform(status_texts))
batch = [{"status_id": status_id, "prediction": pred} for status_id, pred in zip(status_ids, preds)]
bq_service.insert_records_in_batches(predictions_table, batch)
job.counter += len(chunk_df)
job.progress_report()
batch = []
job.end()
| 2,580 | 32.960526 | 140 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp_v2/model_training.py |
import os
from datetime import datetime
from pprint import pprint
from pandas import DataFrame, read_csv
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report # accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
#from sklearn.ensemble import RandomForestClassifier
#from sklearn.pipeline import Pipeline
#from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
from app import APP_ENV, DATA_DIR, seek_confirmation
#from app.job import Job
from app.decorators.number_decorators import fmt_n
#from app.bq_service import BigQueryService
from app.nlp.model_storage import ModelStorage
NLP_DIR = os.path.join(DATA_DIR, "nlp_v2")
def three_community_labels(score):
if 0 < score and score < 1:
score = 0.5
return score
def three_community_labels_v2(score):
if score < 0.3:
label = "D"
elif score > 0.7:
label = "R"
else:
label = "U"
return label
def two_community_party_labels(score):
if score <= 0.5:
label = "D"
else:
label = "R"
return label
def generate_histogram(df, label_column, img_filepath=None, show_img=False, title="Data Labels"):
#print("ROWS:", fmt_n(len(df)))
#print(df.head())
#print("VALUE COUNTS:")
print(df[label_column].value_counts())
labels = df[label_column]
plt.grid()
plt.title(title)
plt.hist(labels, color="grey")
plt.xlabel("Label")
plt.ylabel("Frequency")
if img_filepath:
plt.savefig(img_filepath)
#show_img = show_img or (APP_ENV == "development")
if show_img:
plt.show()
plt.clf() # clear the figure, to prevent topic text overlapping from previous plots
class Trainer:
def __init__(self):
self.text_column = "status_text"
self.raw_label_column = "avg_community_score"
self.label_maker = two_community_party_labels #vanity_labels # squish_the_middle
self.label_column = "community_label"
self.df = None
self.df_train = None
self.df_test = None
self.x_train = None
self.y_train = None
self.x_test = None
self.y_test = None
self.tv = None
self.matrix_train = None
self.matrix_test = None
def load_data(self):
print("--------------------------")
print("LOADING LABELED DATA...")
self.df = read_csv(os.path.join(NLP_DIR, "2_community_labeled_status_texts.csv"))
generate_histogram(self.df, self.raw_label_column, title="Raw Data Labels", img_filepath=os.path.join(NLP_DIR, "raw_histogram.png"))
df_middle = self.df[(self.df[self.raw_label_column] > 0) & (self.df[self.raw_label_column] < 1)]
generate_histogram(df_middle, self.raw_label_column, title="Raw Data Labels (Excluding 0 and 1)", img_filepath=os.path.join(NLP_DIR, "raw_histogram_middle.png"))
#def process_data(self):
print("--------------------------")
print("PRE-PROCESSING...")
# If you leave some of the 1 count labels in, when you try to stratify, you'll get ValueError: The least populated class in y has only 1 member, which is too few. The minimum number of groups for any class cannot be less than 2.
# So let's take all the statuses with a score in-between 0 and 1, and give them a label of 0.5 (not sure)
self.df[self.label_column] = self.df[self.raw_label_column].apply(self.label_maker)
# Need to convert floats to integers or else Logistic Regression will raise ValueError: Unknown label type: 'continuous'
#self.df[self.label_column] = self.df[self.label_column].astype(str) # convert to categorical
generate_histogram(self.df, self.label_column, title="Training Data", img_filepath=os.path.join(NLP_DIR, "training_data_histogram.png"))
def split_data(self):
print("--------------------------")
print("SPLITTING...")
self.df_train, self.df_test = train_test_split(self.df, stratify=self.df[self.label_column], test_size=0.2, random_state=99)
print("--------------------------")
print("TRAIN:")
print(self.df_train.head())
generate_histogram(self.df_train, self.label_column) # should ideally be around equal for each class!
self.x_train = self.df_train[self.text_column]
self.y_train = self.df_train[self.label_column]
#print("--------------------------")
#print("TEST:")
#generate_histogram(self.df_test, self.label_column) # should have same dist
self.x_test = self.df_test[self.text_column]
self.y_test = self.df_test[self.label_column]
def vectorize(self):
print("--------------------------")
print("VECTORIZING...")
self.tv = TfidfVectorizer()
self.tv.fit(self.x_train)
print("FEATURES / TOKENS:", fmt_n(len(self.tv.get_feature_names())))
self.matrix_train = self.tv.transform(self.x_train)
print("FEATURE MATRIX (TRAIN):", type(self.matrix_train), self.matrix_train.shape)
self.matrix_test = self.tv.transform(self.x_test)
print("FEATURE MATRIX (TEST):", type(self.matrix_test), self.matrix_test.shape)
def train_and_score_models(self, models=None):
job_id = ("dev" if APP_ENV == "development" else datetime.now().strftime("%Y-%m-%d-%H%M")) # overwrite same model in development
models = models or {
"logistic_regression": LogisticRegression(random_state=99),
"multinomial_nb": MultinomialNB()
}
for model_name in models.keys():
print("--------------------------")
print("MODEL:")
model = models[model_name]
print(model)
print("TRAINING...")
model.fit(self.matrix_train, self.y_train)
print("TRAINING SCORES:")
y_pred_train = model.predict(self.matrix_train)
scores_train = classification_report(self.y_train, y_pred_train, output_dict=True)
print("ACCY:", scores_train["accuracy"])
pprint(scores_train)
print("TEST SCORES:")
y_pred_test = model.predict(self.matrix_test)
scores_test = classification_report(self.y_test, y_pred_test, output_dict=True)
print("ACCY:", scores_test["accuracy"])
pprint(scores_test)
print("SAVING ...")
storage = ModelStorage(dirpath=f"nlp_v2/models/{job_id}/{model_name}")
storage.save_vectorizer(self.tv)
storage.save_model(model)
storage.save_scores({
"model_name": model_name,
"job_id": job_id,
"features": len(self.tv.get_feature_names()),
"label_maker": self.label_maker.__name__,
"matrix_train": self.matrix_train.shape,
"matrix_test": self.matrix_test.shape,
"scores_train": scores_train,
"scores_test": scores_test
})
if __name__ == "__main__":
trainer = Trainer()
trainer.load_data()
trainer.df.drop(["avg_community_score", "status_occurrences"], axis="columns", inplace=True) # just make the df smaller
trainer.split_data()
trainer.vectorize()
trainer.train_and_score_models()
| 7,434 | 34.574163 | 236 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp_v2/client.py | import os
from app.nlp.model_storage import ModelStorage
MODEL_DIRPATH = os.getenv("MODEL", default="nlp_v2/models/best/multinomial_nb")
if __name__ == "__main__":
storage = ModelStorage(dirpath=MODEL_DIRPATH)
tv = storage.load_vectorizer()
print(type(tv))
print("FEATURES / TOKENS:", len(tv.get_feature_names())) #> 3842
clf = storage.load_model()
print(type(clf))
while True:
status_text = input("Status Text: ")
if not status_text:
print("THANKS! COME AGAIN!")
break
matrix = tv.transform([status_text])
#print(matrix)
result = clf.predict(matrix)
print("PREDICTION:", result[0])
| 693 | 22.133333 | 79 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp_v2/bulk_predict_old.py | # Don't use this method. Just fetch tweets once otherwise its very expensive
#import os
#
#from app import seek_confirmation
#from app.job import Job
#from app.bq_service import BigQueryService
#from app.nlp.model_storage import ModelStorage
#
#LIMIT = os.getenv("LIMIT")
#BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="100000"))
#
#if __name__ == "__main__":
#
# for model_name in ["logistic_regression", "multinomial_nb"]:
#
# storage = ModelStorage(dirpath=f"nlp_v2/models/best/{model_name}")
# tv = storage.load_vectorizer()
# clf = storage.load_model()
#
# bq_service = BigQueryService()
#
# print(f"DESTROY PREDICTIONS TABLE? ({model_name})")
# seek_confirmation()
# bq_service.nlp_v2_destructively_migrate_predictions_table(model_name)
# predictions_table = bq_service.nlp_v2_get_predictions_table(model_name) # API call. cache it here once.
#
# job = Job()
# job.start()
#
# ids_batch = []
# statuses_batch = []
# for row in bq_service.nlp_v2_fetch_statuses(limit=LIMIT):
# ids_batch.append(row["status_id"])
# statuses_batch.append(row["status_text"])
#
# job.counter += 1
# if job.counter % BATCH_SIZE == 0:
# preds = clf.predict(tv.transform(statuses_batch))
# batch = [{"status_id": s_id, "prediction": pred} for s_id, pred in zip(ids_batch, preds)]
# bq_service.insert_records_in_batches(predictions_table, batch)
#
# job.progress_report()
# ids_batch = []
# statuses_batch = []
# batch = []
#
# if len(statuses_batch) > 0:
# preds = clf.predict(tv.transform(statuses_batch))
# batch = [{"status_id": s_id, "prediction": pred} for s_id, pred in zip(ids_batch, preds)]
# bq_service.insert_records_in_batches(predictions_table, batch)
#
# job.progress_report()
# ids_batch = []
# statuses_batch = []
# batch = []
#
# job.end()
#
| 2,078 | 33.65 | 112 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp_v2/bert_score_uploader.py |
import os
from pandas import read_csv
from app import DATA_DIR, seek_confirmation
from app.job import Job
from app.bq_service import BigQueryService
from app.retweet_graphs_v2.k_days.generator import DateRangeGenerator
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default=25000)) # the max number of processed users to store in BQ at once (with a single insert API call)
if __name__ == "__main__":
bq_service = BigQueryService()
job = Job()
print(f"DESTROY PREDICTIONS TABLE? (BERT)")
seek_confirmation()
bq_service.nlp_v2_destructively_migrate_predictions_table("bert")
predictions_table = bq_service.nlp_v2_get_predictions_table("bert")
job.start()
for dr in DateRangeGenerator(start_date="2019-12-20", k_days=1, n_periods=58).date_ranges:
print(dr.start_date)
csv_filepath = os.path.join(DATA_DIR, "daily_active_edge_friend_graphs_v5", dr.start_date, "tweets_BERT_Impeachment_800KTweets.csv")
#df = read_csv(csv_filepath, usecols=["status_id", "text", "logit_0", "logit_1", "opinion_tweet"], nrows=100)
#print(df.head())
for chunk_df in read_csv(csv_filepath, usecols=["status_id", "logit_0", "logit_1", "opinion_tweet"], chunksize=BATCH_SIZE): # FYI: this will include the last chunk even if it is not a full batch
chunk_df.rename(columns={"opinion_tweet": "prediction"}, inplace=True)
#print(chunk_df.head())
batch = chunk_df.to_dict("records")
bq_service.insert_records_in_batches(predictions_table, batch)
job.counter += len(batch)
job.progress_report()
job.end()
| 1,628 | 37.785714 | 202 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_communities/retweet_analyzer.py |
import os
from functools import lru_cache
from pandas import DataFrame
import matplotlib.pyplot as plt
import plotly.express as px
import squarify
from app import APP_ENV, seek_confirmation
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.bot_communities.csv_storage import LocalStorage
from app.bot_communities.tokenizers import Tokenizer
from app.bot_communities.token_analyzer import summarize_token_frequencies, train_topic_model, parse_topics, LdaMulticore
class RetweetsAnalyzer:
def __init__(self, community_id, community_retweets_df, local_dirpath, tokenize=None):
self.community_id = community_id
self.community_retweets_df = community_retweets_df
self.local_dirpath = local_dirpath
self.tokenize = tokenize or Tokenizer().custom_stems # todo: see if we can use a spacy version
if not os.path.exists(self.local_dirpath):
os.makedirs(self.local_dirpath)
self.customize_paths_and_titles()
def customize_paths_and_titles(self):
"""Overwrite all in child class as desired"""
self.most_retweets_chart_filepath = os.path.join(self.local_dirpath, "most-retweets.png")
self.most_retweets_chart_title = f"Users Most Retweeted by Bot Community {self.community_id}"
self.most_retweeters_chart_filepath = os.path.join(self.local_dirpath, "most-retweeters.png")
self.most_retweeters_chart_title = f"Users with Most Retweeters from Bot Community {self.community_id}"
self.top_tokens_csv_filepath = os.path.join(self.local_dirpath, "top-tokens.csv")
self.top_tokens_wordcloud_filepath = os.path.join(self.local_dirpath, "top-tokens-wordcloud.png")
self.top_tokens_wordcloud_title = f"Word Cloud for Community {self.community_id} (n={fmt_n(len(self.community_retweets_df))})"
self.topics_csv_filepath = os.path.join(self.local_dirpath, "topics.csv")
@property
@lru_cache(maxsize=None)
def most_retweets_df(self):
print("USERS WITH MOST RETWEETS")
df = self.community_retweets_df.groupby("retweeted_user_screen_name").agg({"status_id": ["nunique"]})
# fix / un-nest column names after the group:
df.columns = list(map(" ".join, df.columns.values))
df = df.reset_index()
df.rename(columns={"status_id nunique": "Retweet Count", "retweeted_user_screen_name": "Retweeted User"}, inplace=True)
return df
def generate_most_retweets_chart(self, top_n=10):
chart_df = self.most_retweets_df.copy()
chart_df.sort_values("Retweet Count", ascending=False, inplace=True) # sort for top
chart_df = chart_df[:top_n] # take top n rows
chart_df.sort_values("Retweet Count", ascending=True, inplace=True) # re-sort for chart
fig = px.bar(chart_df, x="Retweet Count", y="Retweeted User", orientation="h", title=self.most_retweets_chart_title)
if APP_ENV == "development":
fig.show()
fig.write_image(self.most_retweets_chart_filepath)
@property
@lru_cache(maxsize=None)
def most_retweeters_df(self):
print("USERS WITH MOST RETWEETERS")
df = self.community_retweets_df.groupby("retweeted_user_screen_name").agg({"user_id": ["nunique"]})
df.columns = list(map(" ".join, df.columns.values))
df = df.reset_index()
df.rename(columns={"user_id nunique": "Retweeter Count", "retweeted_user_screen_name": "Retweeted User"}, inplace=True)
return df
def generate_most_retweeters_chart(self, top_n=10):
chart_df = self.most_retweeters_df.copy()
chart_df.sort_values("Retweeter Count", ascending=False, inplace=True) # sort for top
chart_df = chart_df[:top_n]
chart_df.sort_values("Retweeter Count", ascending=True, inplace=True) # re-sort for chart
fig = px.bar(chart_df, x="Retweeter Count", y="Retweeted User", orientation="h", title=self.most_retweeters_chart_title)
if APP_ENV == "development":
fig.show()
fig.write_image(self.most_retweeters_chart_filepath)
#
# NLP
#
@property
@lru_cache(maxsize=None)
def status_tokens(self):
"""Returns pandas.core.series.Series of statuses converted to tokens"""
print("TOKENIZING...")
return self.community_retweets_df["status_text"].apply(self.tokenize)
@property
@lru_cache(maxsize=None)
def top_tokens_df(self):
return summarize_token_frequencies(self.status_tokens.values.tolist())
def save_top_tokens(self):
self.top_tokens_df.to_csv(self.top_tokens_csv_filepath)
def generate_top_tokens_wordcloud(self, top_n=20):
print("TOP TOKENS WORD CLOUD...")
chart_df = self.top_tokens_df[self.top_tokens_df["rank"] <= top_n]
squarify.plot(sizes=chart_df["pct"], label=chart_df["token"], alpha=0.8)
plt.title(self.top_tokens_wordcloud_title)
plt.axis("off")
if APP_ENV == "development":
plt.show()
plt.savefig(self.top_tokens_wordcloud_filepath)
plt.clf() # clear the figure, to prevent topic text overlapping from previous plots
#
# TOPIC MODELING - not really used right now / yet
#
@property
@lru_cache(maxsize=None)
def topic_model(self):
## if local file exists, load and return it, otherwise train a new one, save it and return it
#if os.path.isfile(local_lda_path):
# lda = LdaModel.load(local_lda_path)
#else:
# lda = train_topic_model(self.status_tokens.values.tolist())
# lda.save(local_lda_path)
#return lda
return train_topic_model(self.status_tokens.values.tolist())
@property
@lru_cache(maxsize=None)
def topics_df(self):
return DataFrame(parse_topics(self.topic_model)) # this doesn't make the most sense in current form, as it represents a sparse matrix where there is a column per term
def save_topics(self):
self.topics_df.to_csv(self.topics_csv_filepath)
if __name__ == "__main__":
storage = LocalStorage()
storage.load_retweets()
print(storage.retweets_df.head())
seek_confirmation()
for community_id in storage.retweet_community_ids:
filtered_df = storage.retweets_df[storage.retweets_df["community_id"] == community_id]
local_dirpath = os.path.join(storage.local_dirpath, f"community-{community_id}")
community_analyzer = RetweetsAnalyzer(community_id=community_id, community_retweets_df=filtered_df, local_dirpath=local_dirpath)
community_analyzer.generate_most_retweets_chart()
community_analyzer.generate_most_retweeters_chart()
community_analyzer.top_tokens_df
community_analyzer.save_top_tokens()
community_analyzer.generate_top_tokens_wordcloud()
#community_analyzer.topics_df # TODO: taking too long for entire dataset of tweets. more feasible with daily slices
#community_analyzer.save_topics() # TODO: taking too long for entire dataset of tweets. more feasible with daily slices
| 7,116 | 41.873494 | 174 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_communities/bot_similarity_grapher.py |
import os
from networkx import write_gpickle, read_gpickle, jaccard_coefficient, Graph
from app import seek_confirmation
from app.bot_communities.bot_retweet_grapher import BotRetweetGrapher
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
class BotSimilarityGrapher(BotRetweetGrapher):
def __init__(self):
super().__init__()
self.similarity_graph = None
@property
def retweet_graph(self):
return self.graph
def retweet_graph_report(self):
self.report()
def perform(self):
"""
Given:
bot_ids (list) a unique list of bot ids, which should all be included as nodes in the bot retweet graph.
The retweet graph will also contain retweeted users. So that's why we need a separate list.
The bot ids will be used as nodes in the similarity graph.
bot_retweet_graph (networkx.DiGraph) a retweet graph generated from the bot list
Returns: a similarity graph (networkx.Graph), where the similarity is based on the Jaccard index.
For each pair of bots we calculate the Jaccard index based on the sets of people they retweet.
If two bots retweet exactly the same users, their Jaccard index is one.
If they don't retweet anyone in common, their Jaccard index is zero.
"""
grapher.retweet_graph_report()
bot_ids = [row.user_id for row in self.bq_service.fetch_bot_ids(bot_min=self.bot_min)]
print("FETCHED", fmt_n(len(bot_ids)), "BOT IDS")
node_pairs = []
for i, bot_id in enumerate(bot_ids):
for other_bot_id in bot_ids[i+1:]:
if self.retweet_graph.has_node(other_bot_id) and self.retweet_graph.has_node(bot_id):
node_pairs.append((bot_id, other_bot_id))
# could maybe just take the combinations between all nodes in the bot graph
# because we can assume they were assembled using the same bot ids as the ones here
# but the point is to be methodologically sound and it doesn't take that long
print("NODE PAIRS:", fmt_n(len(node_pairs)))
results = jaccard_coefficient(self.retweet_graph.to_undirected(), node_pairs)
#> returns an iterator of 3-tuples in the form (u, v, p)
#> where (u, v) is a pair of nodes and p is their Jaccard coefficient.
print("JACCARD COEFFICIENTS BETWEEN EACH NODE PAIR - COMPLETE!") #, fmt_n(len(list(results))))
print("CONSTRUCTING SIMILARITY GRAPH...")
self.similarity_graph = Graph()
edge_count = 0
#positive_results = [r for r in results if r[2] > 0] # this takes a while, maybe let's just stick with the original iterator approach
for bot_id, other_bot_id, similarity_score in results:
if similarity_score > 0:
self.similarity_graph.add_edge(bot_id, other_bot_id, weight=similarity_score)
edge_count += 1
self.counter += 1
if self.counter % self.batch_size == 0:
print(logstamp(), "|", fmt_n(self.counter), "|", fmt_n(edge_count), "EDGES")
#
# BOT SIMILARITY GRAPH STORAGE
# TODO: refactor into a new storage service to inherit from the base storage service,
# and mix that in instead (requires some parent class de-coupling)
#
@property
def local_similarity_graph_filepath(self):
return os.path.join(self.local_dirpath, "similarity_graph.gpickle")
@property
def gcs_similarity_graph_filepath(self):
return os.path.join(self.gcs_dirpath, "similarity_graph.gpickle")
def write_similarity_graph(self):
print("SAVING SIMILARITY GRAPH...")
write_gpickle(self.similarity_graph, self.local_similarity_graph_filepath)
def upload_similarity_graph(self):
print("UPLOADING SIMILARITY GRAPH...")
self.upload_file(self.local_similarity_graph_filepath, self.gcs_similarity_graph_filepath)
def load_similarity_graph(self):
print("LOADING SIMILARITY GRAPH...")
if not os.path.isfile(self.local_similarity_graph_filepath):
self.download_file(self.gcs_similarity_graph_filepath, self.local_similarity_graph_filepath)
return read_gpickle(self.local_similarity_graph_filepath)
def save_similarity_graph(self):
self.write_similarity_graph()
self.upload_similarity_graph()
def similarity_graph_report(self):
if not self.similarity_graph:
self.similarity_graph = self.load_similarity_graph()
print("-------------------")
print("SIMILARITY GRAPH", type(self.similarity_graph))
print(" NODES:", fmt_n(self.similarity_graph.number_of_nodes()))
print(" EDGES:", fmt_n(self.similarity_graph.number_of_edges()))
print("-------------------")
if __name__ == "__main__":
grapher = BotSimilarityGrapher()
grapher.start()
grapher.perform()
grapher.end()
grapher.similarity_graph_report()
grapher.save_similarity_graph()
| 5,089 | 40.382114 | 141 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_communities/token_analyzer.py | from collections import Counter
from pandas import DataFrame
from gensim.corpora import Dictionary
from gensim.models.ldamulticore import LdaMulticore
#from gensim.models import TfidfModel
def summarize_token_frequencies(token_sets):
"""
Param token_sets : a list of tokens for each document in a collection
Returns a DataFrame with a row per topic and columns for various TF/IDF-related scores.
"""
print("COMPUTING TOKEN AND DOCUMENT FREQUENCIES...")
token_counter = Counter()
doc_counter = Counter()
for tokens in token_sets:
token_counter.update(tokens)
doc_counter.update(set(tokens)) # removes duplicate tokens so they only get counted once per doc!
token_counts = zip(token_counter.keys(), token_counter.values())
doc_counts = zip(doc_counter.keys(), doc_counter.values())
token_df = DataFrame(token_counts, columns=["token", "count"])
doc_df = DataFrame(doc_counts, columns=["token", "doc_count"])
df = doc_df.merge(token_df, on="token")
df["rank"] = df["count"].rank(method="first", ascending=False)
df["pct"] = df["count"] / df["count"].sum()
df["doc_pct"] = df["doc_count"] / len(token_sets)
#df = df.sort_values(by="rank")
#df["running_pct"] = df["pct"].cumsum()
return df.reindex(columns=["token", "rank", "count", "pct", "doc_count", "doc_pct"]).sort_values(by="rank")
#
# TOPIC MODELING
#
def train_topic_model(token_sets):
dictionary = Dictionary(token_sets)
print(type(dictionary)) #> <class 'gensim.corpora.dictionary.Dictionary'>
bags_of_words = [dictionary.doc2bow(tokens) for tokens in token_sets]
lda = LdaMulticore(corpus=bags_of_words, id2word=dictionary, random_state=99, passes=1, workers=3)
print(type(lda))
return lda
def parse_topics(lda):
"""
Params: lda (gensim.models.ldamulticore.LdaMulticore) a pre-fit LDA model
Returns: a list of topic records like... {'impeach': 0.058, 'trump': 0.052, 'gop': 0.042, 'clinton': 0.039, 'commit': 0.037, 'condu': 0.037, 'proper': 0.037, 'defense': 0.037, 'jury': 0.037, 'grand': 0.037}
"""
parsed_response = []
topics_response = lda.print_topics()
for topic_row in topics_response:
topics = topic_row[1] #> '0.067*"sleep" + 0.067*"got" + 0.067*"went" + 0.067*"until" + 0.067*"to" + 0.067*"tired" + 0.067*"they" + 0.067*"all" + 0.067*"ate" + 0.067*"the"'
topic_pairs = [s.replace('"', "").split("*") for s in topics.split(" + ")] #> [ ['0.067', 'sleep'], ['0.067', 'got'], [], etc... ]
doc_topics = {}
for topic_pair in topic_pairs:
doc_topics[topic_pair[1]] = float(topic_pair[0])
#print(doc_topics) #> {'sleep': 0.067, 'got': 0.067, etc}
parsed_response.append(doc_topics)
return parsed_response
| 2,787 | 40 | 210 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_communities/bot_profile_analyzer_v2.py | import os
from app import DATA_DIR
from app.bq_service import BigQueryService
from app.file_storage import FileStorage
from app.bot_communities.tokenizers import Tokenizer #, SpacyTokenizer
from app.bot_communities.token_analyzer import summarize_token_frequencies
from pandas import DataFrame
if __name__ == "__main__":
file_storage = FileStorage(dirpath="bot_retweet_graphs/bot_min/0.8/n_communities/2/analysis_v2")
bq_service = BigQueryService()
tokenizer = Tokenizer()
results = [dict(row) for row in list(bq_service.fetch_bot_community_profiles(n_communities=2))]
print("FETCHED", len(results), "RECORDS")
for i, row in enumerate(results):
row["profile_tokens"] = []
row["profile_tags"] = []
if row["user_descriptions"]:
#print("--------------")
#print("COMMUNITY", row["community_id"], i, row["bot_id"], row["screen_names"])
#print(row["user_descriptions"])
# we want unique tokens here because otherwise someone changing their description (multiple descriptions) will have a greater influence over the counts
# but then it makes TF/IDF not possible because the doc counts are the same as the token counts
# really we are just counting number of users who have these tokens...
tokens = list(set(tokenizer.custom_stems(row["user_descriptions"])))
row["profile_tokens"] = tokens
#print("TOKENS:", tokens)
tags = list(set(tokenizer.hashtags(row["user_descriptions"])))
row["profile_tags"] = tags
#print("TAGS:", tags)
print("--------------")
print("BOT PROFILES:")
profiles_df = DataFrame(results)
print(profiles_df.head())
# SAVE AND UPLOAD PROFILES
local_profiles_filepath = os.path.join(file_storage.local_dirpath, "community_profiles.csv")
gcs_profiles_filepath = os.path.join(file_storage.gcs_dirpath, "community_profiles.csv")
profiles_df.to_csv(local_profiles_filepath)
file_storage.upload_file(local_profiles_filepath, gcs_profiles_filepath)
for community_id, filtered_df in profiles_df.groupby(["community_id"]):
print("--------------")
print(f"COMMUNITY {community_id}:", len(filtered_df))
local_community_dirpath = os.path.join(file_storage.local_dirpath, f"community_{community_id}")
gcs_community_dirpath = os.path.join(file_storage.gcs_dirpath, f"community_{community_id}")
if not os.path.exists(local_community_dirpath):
os.makedirs(local_community_dirpath)
tokens_df = summarize_token_frequencies(filtered_df["profile_tokens"].tolist())
print(tokens_df.head())
# SAVE AND UPLOAD PROFILE TOKENS
local_tokens_filepath = os.path.join(local_community_dirpath, "profile_tokens.csv")
gcs_tokens_filepath = os.path.join(gcs_community_dirpath, "profile_tokens.csv")
tokens_df.to_csv(local_tokens_filepath)
file_storage.upload_file(local_tokens_filepath, gcs_tokens_filepath)
token_records = tokens_df[tokens_df["count"] > 1].to_dict("records")
bq_service.upload_bot_community_profile_tokens(community_id=community_id, records=token_records)
tags_df = summarize_token_frequencies(filtered_df["profile_tags"].tolist())
print(tags_df.head())
# SAVE AND UPLOAD PROFILE TAGS
local_tags_filepath = os.path.join(local_community_dirpath, "profile_tags.csv")
gcs_tags_filepath = os.path.join(gcs_community_dirpath, "profile_tags.csv")
tags_df.to_csv(local_tags_filepath)
file_storage.upload_file(local_tags_filepath, gcs_tags_filepath)
tag_records = tags_df[tags_df["count"] > 1].to_dict("records")
bq_service.upload_bot_community_profile_tags(community_id=community_id, records=tag_records)
| 3,836 | 48.192308 | 163 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_communities/bot_tweet_analyzer_v2.py | import os
from app import DATA_DIR, seek_confirmation
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.bq_service import BigQueryService
from app.file_storage import FileStorage
from app.bot_communities.tokenizers import Tokenizer
from app.bot_communities.token_analyzer import summarize_token_frequencies
from pandas import DataFrame, read_csv
LIMIT = os.getenv("LIMIT") # for development purposes
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default=10000)) # the max number of processed users to store in BQ at once (with a single insert API call)
DESTRUCTIVE = (os.getenv("DESTRUCTIVE", default="false") == "true") # whether or not to re-download if a local file already exists
if __name__ == "__main__":
# INIT
file_storage = FileStorage(dirpath="bot_retweet_graphs/bot_min/0.8/n_communities/2/analysis_v2")
local_tweets_filepath = os.path.join(file_storage.local_dirpath, "community_tweets.csv")
#gcs_tweets_filepath = os.path.join(file_storage.gcs_dirpath, "community_tweets.csv")
bq_service = BigQueryService()
tokenizer = Tokenizer()
print("------------------------------")
print("BOT STATUS ANALYZER V2...")
print(" LIMIT:", LIMIT)
print(" BATCH SIZE:", BATCH_SIZE)
print(" DESTRUCTIVE:", DESTRUCTIVE)
seek_confirmation()
# LOAD STATUSES
if os.path.isfile(local_tweets_filepath) and not DESTRUCTIVE:
print("LOADING STATUSES...")
statuses_df = read_csv(local_tweets_filepath)
print(statuses_df.head())
else:
print("FETCHING STATUSES...")
results = []
counter = 0
# TODO: consider doing a query per community, to reduce memory costs
for row in bq_service.fetch_bot_community_statuses(n_communities=2, limit=LIMIT):
row = dict(row)
row["status_tokens"] = []
row["status_tags"] = []
if row["status_text"]:
row["status_tokens"] = tokenizer.custom_stems(row["status_text"])
row["status_tags"] = tokenizer.hashtags(row["status_text"])
results.append(row)
counter += 1
if counter % BATCH_SIZE == 0:
print(logstamp(), fmt_n(counter))
print("--------------")
print("BOT STATUSES:")
statuses_df = DataFrame(results)
print(statuses_df.head())
# SAVE AND UPLOAD TWEETS
statuses_df.to_csv(local_tweets_filepath)
#file_storage.upload_file(local_tweets_filepath, gcs_tweets_filepath)
del results
# PERFORM
for community_id, filtered_df in statuses_df.groupby(["community_id"]):
print("--------------")
print(f"COMMUNITY {community_id}:", fmt_n(len(filtered_df)))
local_community_dirpath = os.path.join(file_storage.local_dirpath, f"community_{community_id}")
gcs_community_dirpath = os.path.join(file_storage.gcs_dirpath, f"community_{community_id}")
if not os.path.exists(local_community_dirpath):
os.makedirs(local_community_dirpath)
local_tokens_filepath = os.path.join(local_community_dirpath, "status_tokens.csv")
gcs_tokens_filepath = os.path.join(gcs_community_dirpath, "status_tokens.csv")
if not os.path.exists(local_community_dirpath) or DESTRUCTIVE:
print("CALCULATING STATUS TOKEN FREQUENCIES...")
tokens_df = summarize_token_frequencies(filtered_df["status_tokens"].tolist())
print(tokens_df.head())
# SAVE AND UPLOAD TOP TOKENS
tokens_df.to_csv(local_tokens_filepath)
file_storage.upload_file(local_tokens_filepath, gcs_tokens_filepath)
token_records = tokens_df[tokens_df["count"] > 1].to_dict("records")[0:1000]
bq_service.upload_bot_community_status_tokens(community_id=community_id, records=token_records)
del tokens_df # clear memory!
local_tags_filepath = os.path.join(local_community_dirpath, "status_tags.csv")
gcs_tags_filepath = os.path.join(gcs_community_dirpath, "status_tags.csv")
if not os.path.exists(local_tags_filepath) or DESTRUCTIVE:
print("CALCULATING STATUS TAG FREQUENCIES...")
tags_df = summarize_token_frequencies(filtered_df["status_tags"].tolist())
print(tags_df.head())
# SAVE AND UPLOAD TOP TAGS
tags_df.to_csv(local_tags_filepath)
file_storage.upload_file(local_tags_filepath, gcs_tags_filepath)
tag_records = tags_df[tags_df["count"] > 1].to_dict("records")[0:1000]
bq_service.upload_bot_community_status_tags(community_id=community_id, records=tag_records)
del tags_df # clear memory!
| 4,750 | 45.126214 | 147 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_communities/bot_retweet_grapher.py |
import os
from networkx import DiGraph, read_gpickle
from app.retweet_graphs_v2.retweet_grapher import RetweetGrapher
BOT_MIN = float(os.getenv("BOT_MIN", default="0.8"))
class BotRetweetGrapher(RetweetGrapher):
def __init__(self, bot_min=BOT_MIN):
self.bot_min = float(bot_min)
super().__init__(storage_dirpath=f"bot_retweet_graphs/bot_min/{self.bot_min}")
@property
def metadata(self):
return {**super().metadata, **{"bot_min": self.bot_min}}
def perform(self):
self.results = []
self.graph = DiGraph()
for row in self.bq_service.fetch_bot_retweet_edges_in_batches(bot_min=self.bot_min):
self.graph.add_edge(row["user_id"], row["retweeted_user_id"], weight=row["retweet_count"])
self.counter += 1
if self.counter % self.batch_size == 0:
self.results.append(self.running_results)
if self.users_limit and self.counter >= self.users_limit:
break
if __name__ == "__main__":
grapher = BotRetweetGrapher()
grapher.save_metadata()
grapher.start()
grapher.perform()
grapher.end()
grapher.report()
grapher.save_results()
grapher.save_graph()
| 1,232 | 27.022727 | 102 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_communities/tokenizers.py | import os
from collections import Counter
from functools import lru_cache
from pprint import pprint
import re
from dotenv import load_dotenv
from pandas import DataFrame
from nltk.corpus import stopwords as NLTK_STOPWORDS
from gensim.parsing.preprocessing import STOPWORDS as GENSIM_STOPWORDS
from spacy.lang.en.stop_words import STOP_WORDS as SPACY_STOP_WORDS
#from spacy.tokenizer import Tokenizer
import spacy
from nltk.stem import PorterStemmer
load_dotenv()
MODEL_SIZE = os.getenv("MODEL_SIZE", default="sm") # sm, md, lg
CUSTOM_STOP_WORDS = {
"rt", "httpstco", "amp", # twitter / tweet stuff
"today", "tonight", "tomorrow", "time", "ago",
"monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday",
"want", "wants", "like", "get", "go", "say", "says", "told",
"one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "hundred", "thousand",
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10",
"th", "im", "hes", "hi", "thi",
# impeachment-specific stopwords
"rep", "president", "presidents", "col",
#"impeach", "impeachment", "impeached",
# "trump", "articles", "trial", "house", "senate"
}
ALPHANUMERIC_PATTERN = r'[^a-zA-Z ^0-9]' # alphanumeric only (strict)
TWITTER_ALPHANUMERIC_PATTERN = r'[^a-zA-Z ^0-9 # @]' # alphanumeric, plus hashtag and handle symbols (twitter-specific)
class CustomStemmer():
def stem(self, token):
if token in ["impeachment", "impeached"]:
token = "impeach"
if token == "trumps":
token = "trump"
if token == "pelosis":
token = "pelosi"
if token == "democrats":
token = "democrat"
if token == "republicans":
token = "republican"
return token
class Tokenizer():
def __init__(self):
self.porter_stemmer = PorterStemmer()
self.custom_stemmer = CustomStemmer()
@property
@lru_cache(maxsize=None)
def stop_words(self):
words = set(NLTK_STOPWORDS.words("english")) | SPACY_STOP_WORDS | GENSIM_STOPWORDS | CUSTOM_STOP_WORDS
words |= set([word.replace("'","") for word in words if "'" in word]) # contraction-less: "don't" -> "dont"
return words
def basic_tokens(self, txt):
txt = txt.lower() # normalize case
txt = re.sub(ALPHANUMERIC_PATTERN, "", txt) # keep only alphanumeric characters
tokens = txt.split()
tokens = [token for token in tokens if token not in self.stop_words] # remove stopwords
return tokens
def porter_stems(self, txt):
tokens = self.basic_tokens(txt)
stems = [self.porter_stemmer.stem(token) for token in tokens] # custom word stems only
stems = [stem for stem in stems if stem not in self.stop_words] # remove stopwords again
return stems
def custom_stems(self, txt):
tokens = self.basic_tokens(txt)
stems = [self.custom_stemmer.stem(token) for token in tokens] # custom word stems only
stems = [stem for stem in stems if stem not in self.stop_words] # remove stopwords again
return stems
def hashtags(self, txt):
txt = re.sub(TWITTER_ALPHANUMERIC_PATTERN, "", txt.upper())
tags = [token for token in txt.split() if token.startswith("#") and not token.endswith("#")]
return tags
def handles(self, txt):
txt = re.sub(TWITTER_ALPHANUMERIC_PATTERN, "", txt.upper())
handlez = [token for token in txt.split() if token.startswith("@") and not token.endswith("@")]
return handlez
class SpacyTokenizer(Tokenizer):
def __init__(self, model_size=MODEL_SIZE):
super().__init__()
self.model_name = f"en_core_web_{model_size}"
self.nlp = spacy.load(self.model_name)
print("SPACY TOKENIZER:", type(self.nlp), self.model_name.upper())
def custom_stem_lemmas(self, txt):
txt = txt.lower() # normalize case
txt = re.sub(ALPHANUMERIC_PATTERN, "", txt) # keep only alphanumeric characters
doc = self.nlp(txt) #> <class 'spacy.tokens.doc.Doc'>
tokens = [token for token in doc if token.is_punct == False and token.is_space == False]
tokens = [token for token in tokens if token.is_stop == False and str(token) not in self.stop_words] # double stopword removal!!!
lemmas = [token.lemma_.lower() for token in tokens]
lemmas = [self.custom_stemmer.stem(lemma) for lemma in lemmas]
return [lemma for lemma in lemmas if lemma not in self.stop_words]
def entity_tokens(self, txt):
doc = self.nlp(txt) #> <class 'spacy.tokens.doc.Doc'>
return doc.ents
if __name__ == "__main__":
print("----------------")
status_text = "Welcome to New York. Welcoming isn't it? :-D"
print(status_text)
print("----------------")
tokenizer = Tokenizer()
print(type(tokenizer))
print(len(tokenizer.stop_words))
print(" basic_tokens:", tokenizer.basic_tokens(status_text))
print(" porter_stems:", tokenizer.porter_stems(status_text))
print(" custom_stems:", tokenizer.custom_stems(status_text))
print("----------------")
tokenizer = SpacyTokenizer()
print(type(tokenizer))
print(len(tokenizer.stop_words))
print(" custom_stem_lemmas:", tokenizer.custom_stem_lemmas(status_text))
print(" entity_tokens:", tokenizer.entity_tokens(status_text))
| 5,396 | 38.108696 | 137 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_communities/tweet_analyzer.py |
import os
from pandas import DataFrame, read_csv
from app.decorators.datetime_decorators import dt_to_s, logstamp
from app.decorators.number_decorators import fmt_n
from app.bot_communities.spectral_clustermaker import SpectralClustermaker
BATCH_SIZE = 50_000 # we are talking about downloading 1-2M tweets
class CommunityTweetAnalyzer:
def __init__(self):
self.clustermaker = SpectralClustermaker()
self.n_clusters = self.clustermaker.n_clusters
self.bq_service = self.clustermaker.grapher.bq_service
self.tweets_filepath = os.path.join(self.clustermaker.local_dirpath, "tweets.csv")
print(os.path.abspath(self.tweets_filepath))
def load_tweets(self):
"""
Loads or downloads bot community tweets to/from CSV.
"""
if os.path.isfile(self.tweets_filepath):
print("READING BOT COMMUNITY TWEETS FROM CSV...")
self.tweets_df = read_csv(self.tweets_filepath) # DtypeWarning: Columns (6) have mixed types.Specify dtype option on import or set low_memory=False
else:
print("DOWNLOADING BOT COMMUNITY TWEETS...")
counter = 0
records = []
for row in self.bq_service.download_n_bot_community_tweets_in_batches(self.n_clusters):
records.append({
"community_id": row.community_id,
"user_id": row.user_id,
"user_name": row.user_name,
"user_screen_name": row.user_screen_name,
"user_description": row.user_description,
"user_location": row.user_location,
"user_verified": row.user_verified,
"user_created_at": dt_to_s(row.user_created_at),
"status_id": row.status_id,
"status_text": row.status_text,
"reply_user_id": row.reply_user_id,
"retweet_status_id": row.retweet_status_id,
"status_is_quote": row.status_is_quote,
"status_geo": row.status_geo,
"status_created_at": dt_to_s(row.status_created_at)
})
counter+=1
if counter % BATCH_SIZE == 0: print(logstamp(), fmt_n(counter))
self.tweets_df = DataFrame(records)
self.tweets_df.index.name = "row_id"
self.tweets_df.index = self.tweets_df.index + 1
print("WRITING TO FILE...")
self.tweets_df.to_csv(self.tweets_filepath)
if __name__ == "__main__":
analyzer = CommunityTweetAnalyzer()
analyzer.load_tweets()
print(analyzer.tweets_df.head())
| 2,690 | 36.901408 | 159 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_communities/daily_retweet_analyzer.py |
import os
from functools import lru_cache
import time
from concurrent.futures import as_completed, ProcessPoolExecutor, ThreadPoolExecutor
from threading import current_thread #, #Thread, Lock, BoundedSemaphore
from dotenv import load_dotenv
from pandas import to_datetime
from app import APP_ENV, seek_confirmation
from app.decorators.datetime_decorators import logstamp, s_to_date
from app.decorators.number_decorators import fmt_n
from app.bot_communities.csv_storage import LocalStorage
from app.bot_communities.retweet_analyzer import RetweetsAnalyzer
from app.bot_communities.tokenizers import SpacyTokenizer
load_dotenv()
START_DATE = os.getenv("START_DATE", default="2019-12-12")
END_DATE = os.getenv("END_DATE", default="2020-02-20")
PARALLEL = (os.getenv("PARALLEL", default="true") == "true")
MAX_WORKERS = int(os.getenv("MAX_WORKERS", default=10))
class DailyRetweetsAnalyzer(RetweetsAnalyzer):
def __init__(self, community_id, community_retweets_df, parent_dirpath, date, tokenize=None):
local_dirpath = os.path.join(parent_dirpath, "daily")
self.date = date
tokenize = tokenize or SpacyTokenizer().custom_stem_lemmas
super().__init__(community_id, community_retweets_df, local_dirpath, tokenize)
def customize_paths_and_titles(self):
self.most_retweets_chart_filepath = os.path.join(self.local_dirpath, f"most-retweets-{self.date}.png")
self.most_retweets_chart_title = f"Users Most Retweeted by Bot Community {self.community_id} on {self.date}"
self.most_retweeters_chart_filepath = os.path.join(self.local_dirpath, f"most-retweeters-{self.date}.png")
self.most_retweeters_chart_title = f"Users with Most Retweeters from Bot Community {self.community_id} on {self.date}"
self.top_tokens_csv_filepath = os.path.join(self.local_dirpath, f"top-tokens-{self.date}.csv")
self.top_tokens_wordcloud_filepath = os.path.join(self.local_dirpath, f"top-tokens-{self.date}-wordcloud.png")
self.top_tokens_wordcloud_title = f"Word Cloud for Community {self.community_id} on {self.date} (n={fmt_n(len(self.community_retweets_df))})"
self.topics_csv_filepath = os.path.join(self.local_dirpath, f"topics-{self.date}.csv")
def perform(group_name, filtered_df, parent_dirpath, tokenize):
community_id = group_name[0]
date = group_name[1]
parent_dirpath = os.path.join(parent_dirpath, f"community-{community_id}")
print("----------------")
#print(logstamp(), "COMMUNITY", community_id, "| DATE:", date, "|", "| RETWEETS:", fmt_n(len(filtered_df)))
print(logstamp(), "COMMUNITY", community_id, "| DATE:", date, "|", current_thread().name, "| RETWEETS:", fmt_n(len(filtered_df)))
analyzer = DailyRetweetsAnalyzer(community_id, filtered_df, parent_dirpath, date, tokenize)
analyzer.generate_most_retweets_chart()
analyzer.generate_most_retweeters_chart()
analyzer.top_tokens_df
analyzer.save_top_tokens()
analyzer.generate_top_tokens_wordcloud()
#analyzer.topics_df
#analyzer.save_topics()
if __name__ == "__main__":
print("------------------------")
print("DAILY COMMUNITY RETWEETS ANALYSIS...")
print(" PARALLEL-PROCESSING:", PARALLEL)
print(" MAX WORKERS:", MAX_WORKERS)
print(" START DATE:", START_DATE)
print(" END DATE:", END_DATE)
storage = LocalStorage()
storage.load_retweets()
df = storage.retweets_df
print(df.head())
seek_confirmation()
print("----------------")
print("TRANSFORMING RETWEETS...")
df["status_created_date"] = df["status_created_at"].apply(s_to_date)
df["status_created_dt"] = to_datetime(df["status_created_at"])
print("----------------")
print("FILTERING RETWEETS...")
df = df.query(f"{START_DATE.replace('-','')} < status_created_dt < {END_DATE.replace('-','')}") # dashes are for humans, not dataframe queries apparently
print(df["status_created_date"].value_counts())
seek_confirmation()
groupby = storage.retweets_df.groupby(["community_id", "status_created_date"])
tokenize = SpacyTokenizer().custom_stem_lemmas # let's just load the spacy model once and pass it around
if PARALLEL:
#with ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor:
with ThreadPoolExecutor(max_workers=MAX_WORKERS, thread_name_prefix="THREAD") as executor:
futures = [executor.submit(perform, group_name, filtered_df, storage.local_dirpath, tokenize) for group_name, filtered_df in groupby]
for future in as_completed(futures):
result = future.result()
else:
for group_name, filtered_df in groupby:
perform(group_name, filtered_df, storage.local_dirpath, tokenize)
print("----------------")
print("ALL PROCESSES COMPLETE!")
| 4,807 | 41.175439 | 157 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_communities/spectral_clustermaker.py | import os
from functools import lru_cache
from dotenv import load_dotenv
from pandas import DataFrame
from networkx import adjacency_matrix, Graph
from sklearn.cluster import SpectralClustering
import matplotlib.pyplot as plt
from app import seek_confirmation, APP_ENV
from app.bot_communities.bot_similarity_grapher import BotSimilarityGrapher
load_dotenv()
N_COMMUNITIES = int(os.getenv("N_COMMUNITIES", default="2"))
class SpectralClustermaker:
def __init__(self, n_clusters=N_COMMUNITIES):
self.n_clusters = n_clusters
self.classifier = SpectralClustering(n_clusters=self.n_clusters, eigen_solver=None, affinity="precomputed", n_init=20)
self.grapher = BotSimilarityGrapher()
self.local_dirpath = os.path.join(self.grapher.local_dirpath, "n_communities", str(self.n_clusters))
self.gcs_dirpath = os.path.join(self.grapher.gcs_dirpath, "n_communities", str(self.n_clusters))
self.local_bot_communities_filepath = os.path.join(self.local_dirpath, "community_assignments.csv")
self.gcs_bot_communities_filepath = os.path.join(self.gcs_dirpath, "community_assignments.csv")
print("-----------------------")
print("SPECTRAL CLUSTERMAKER")
print(" N CLUSTERS:", self.n_clusters)
print(" CLASSIFIER:", type(self.classifier))
print(" LOCAL DIRPATH:", os.path.abspath(self.local_dirpath))
print(" GCS DIRPATH:", self.gcs_dirpath)
seek_confirmation()
if not os.path.exists(self.local_dirpath):
os.makedirs(self.local_dirpath)
self.grapher.similarity_graph_report() # load bot similarity graph
self.similarity_graph = self.grapher.similarity_graph
self.community_assignments = None
@property
@lru_cache(maxsize=None)
def similarity_matrix(self):
matrix = adjacency_matrix(self.similarity_graph.to_undirected())
print("SIMILARITY MATRIX", type(matrix))
return matrix
def perform(self):
self.classifier.fit(self.similarity_matrix) # makes the predictions, populates labels
user_ids = list(self.similarity_graph.nodes())
community_ids = [label.item() for label in list(self.classifier.labels_)] # converts from np int32 to native python int (so can be stored in BQ without serialization errors)
iterator = zip(user_ids, community_ids)
self.community_assignments = [{"user_id": user_id, "community_id": community_id} for user_id, community_id in iterator]
@property
@lru_cache(maxsize=None)
def community_assignments_df(self):
if not self.community_assignments:
self.perform()
df = DataFrame(self.community_assignments)
df.index.name = "row_id"
df.index += 1
return df
def write_to_file(self):
print("----------------")
print("WRITING COMMUNITY ASSIGNMENTS TO CSV...")
self.community_assignments_df.to_csv(self.local_bot_communities_filepath)
def upload_to_gcs(self):
print("----------------")
print("UPLOADING COMMUNITY ASSIGNMENTS TO GCS...")
self.grapher.upload_file(self.local_bot_communities_filepath, self.gcs_bot_communities_filepath)
def save_to_bq(self):
print("----------------")
print("SAVING COMMUNITY ASSIGNMENTS TO BQ...")
self.grapher.bq_service.overwrite_n_bot_communities_table(
n_communities=self.n_clusters,
records=self.community_assignments
)
def generate_histogram(self):
print("----------------")
print("GENERATING COMMUNITIES HISTOGRAM...")
# todo: optionally customize colors for each series. might be easier to use https://plotly.com/python/histograms/#histogram-with-plotly-express
plt.hist(self.community_assignments_df["community_id"], color="grey")
plt.title(f"Bot Communities Histogram (n_communities={self.n_clusters})")
plt.xlabel("Community Id")
plt.ylabel("User Count")
plt.grid()
img_filepath = os.path.join(self.local_dirpath, "community-assignments.png")
print(os.path.abspath(img_filepath))
plt.savefig(img_filepath)
if APP_ENV == "development":
plt.show() # this clears the figure, so save before or use reference https://stackoverflow.com/a/9012749/670433
if __name__ == "__main__":
clustermaker = SpectralClustermaker()
clustermaker.perform()
clustermaker.write_to_file()
clustermaker.upload_to_gcs()
clustermaker.save_to_bq()
clustermaker.generate_histogram()
| 4,582 | 38.852174 | 181 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_communities/csv_storage.py |
import os
from pandas import DataFrame, read_csv
from app.decorators.datetime_decorators import dt_to_s, logstamp
from app.decorators.number_decorators import fmt_n
from app.bot_communities.spectral_clustermaker import SpectralClustermaker
BATCH_SIZE = 50_000 # we are talking about downloading millions of records
#ROWS_LIMIT = os.getenv("ROWS_LIMIT")
class LocalStorage:
def __init__(self):
self.clustermaker = SpectralClustermaker()
self.n_clusters = self.clustermaker.n_clusters
self.bq_service = self.clustermaker.grapher.bq_service
self.local_dirpath = self.clustermaker.local_dirpath
#self.tweets_filepath = os.path.join(self.local_dirpath, "tweets.csv")
self.retweets_filepath = os.path.join(self.local_dirpath, "retweets.csv")
self.retweets_dirpath = os.path.join(self.local_dirpath, "retweets")
if not os.path.exists(self.retweets_dirpath):
os.makedirs(self.retweets_dirpath)
self.retweets_df = None
def load_retweets(self):
"""
Loads or downloads bot community tweets to/from CSV.
"""
if os.path.isfile(self.retweets_filepath):
print("READING BOT COMMUNITY RETWEETS FROM CSV...")
self.retweets_df = read_csv(self.retweets_filepath) # DtypeWarning: Columns (6) have mixed types.Specify dtype option on import or set low_memory=False
#if ROWS_LIMIT:
# self.retweets_df = read_csv(local_csv_filepath, nrows=int(ROWS_LIMIT))
#else:
# self.retweets_df = read_csv(local_csv_filepath)
else:
print("DOWNLOADING BOT COMMUNITY RETWEETS...")
counter = 0
records = []
for row in self.bq_service.download_n_bot_community_retweets_in_batches(self.n_clusters):
records.append({
"community_id": row.community_id,
"user_id": row.user_id,
"user_screen_name_count": row.user_screen_name_count,
"user_screen_names": row.user_screen_names,
"user_created_at": dt_to_s(row.user_created_at),
"retweeted_user_id": row.retweeted_user_id,
"retweeted_user_screen_name": row.retweeted_user_screen_name,
"status_id": row.status_id,
"status_text": row.status_text,
"status_created_at": dt_to_s(row.status_created_at)
})
counter+=1
if counter % BATCH_SIZE == 0: print(logstamp(), fmt_n(counter))
self.retweets_df = DataFrame(records)
self.retweets_df.index.name = "row_id"
self.retweets_df.index += 1
print("WRITING TO FILE...")
self.retweets_df.to_csv(self.retweets_filepath)
@property
def retweet_community_ids(self):
if self.retweets_df is None:
self.load_retweets()
return list(self.retweets_df["community_id"].unique())
| 3,037 | 39.506667 | 163 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bot_communities/bot_profile_analyzer_v1.py | import os
from app import DATA_DIR
from app.bq_service import BigQueryService
from app.bot_communities.tokenizers import Tokenizer, SpacyTokenizer
from app.bot_communities.token_analyzer import summarize_token_frequencies
from pandas import DataFrame
if __name__ == "__main__":
local_dirpath = os.path.join(DATA_DIR,"bot_retweet_graphs", "bot_min", str(0.8), "n_communities", str(2))
tokenizer = Tokenizer()
spacy_tokenizer = SpacyTokenizer()
bq_service = BigQueryService()
sql = f"""
SELECT
c.community_id
,b.bot_id
-- ,b.bot_screen_name
--,b.day_count
--,b.avg_daily_score
,count(distinct t.status_id) as tweet_count
,COALESCE(STRING_AGG(DISTINCT upper(t.user_screen_name), ' | ') , "") as screen_names
,COALESCE(STRING_AGG(DISTINCT upper(t.user_name), ' | ') , "") as user_names
,COALESCE(STRING_AGG(DISTINCT upper(t.user_description), ' | ') , "") as user_descriptions
FROM impeachment_production.bots_above_80 b
JOIN impeachment_production.2_bot_communities c ON c.user_id = b.bot_id
JOIN impeachment_production.tweets t on cast(t.user_id as int64) = b.bot_id
GROUP BY 1,2
ORDER BY 1,2
""" # TODO: move me into the BQ service
results = [dict(row) for row in list(bq_service.execute_query(sql))]
print("PROCESSING", len(results), "RECORDS...")
for i, row in enumerate(results):
row["profile_tokens"] = []
row["profile_lemmas"] = []
row["profile_tags"] = []
row["profile_handles"] = []
if row["user_descriptions"]:
#print("--------------")
#print("COMMUNITY", row["community_id"], i, row["bot_id"], row["screen_names"])
#print(row["user_descriptions"])
# we want unique tokens here because otherwise someone changing their sn will have a greater influence over the counts
tokens = list(set(tokenizer.custom_stems(row["user_descriptions"])))
row["profile_tokens"] = tokens
#print("TOKENS:", tokens)
lemmas = list(set(spacy_tokenizer.custom_stem_lemmas(row["user_descriptions"])))
row["profile_lemmas"] = lemmas
#print("LEMMAS:", lemmas)
tags = list(set(tokenizer.hashtags(row["user_descriptions"])))
row["profile_tags"] = tags
#print("TAGS:", tags)
handles = list(set(tokenizer.handles(row["user_descriptions"])))
row["profile_handles"] = handles
#print("HANDLES:", handles)
# these are interesting...
#ents = list(set([ent.text for ent in spacy_tokenizer.entity_tokens(row["user_descriptions"])]))
#row["ents"] = ents
#print("ENTITIES:", ents)
df = DataFrame(results)
df.to_csv(os.path.join(local_dirpath, "bot_profiles.csv"))
print(df.head())
#
# SUMMARIZE BY COMMUNITY...
#
for community_id, filtered_df in df.groupby(["community_id"]):
print("--------------")
print("COMMUNITY", community_id, "-", len(filtered_df), "BOTS")
top_tokens_df = summarize_token_frequencies(filtered_df["profile_tokens"].tolist())
top_lemmas_df = summarize_token_frequencies(filtered_df["profile_lemmas"].tolist())
top_tags_df = summarize_token_frequencies(filtered_df["profile_tags"].tolist())
top_handles_df = summarize_token_frequencies(filtered_df["profile_handles"].tolist())
print(top_tokens_df.head())
print(top_lemmas_df.head())
print(top_tags_df.head())
print(top_handles_df.head())
community_dirpath = os.path.join(local_dirpath, f"community-{community_id}")
top_tokens_df.to_csv(os.path.join(community_dirpath, "top_profile_tokens.csv"))
top_lemmas_df.to_csv(os.path.join(community_dirpath, "top_profile_lemmas.csv"))
top_tags_df.to_csv(os.path.join(community_dirpath, "top_profile_tags.csv"))
top_handles_df.to_csv(os.path.join(community_dirpath, "top_profile_handles.csv"))
| 4,124 | 40.25 | 130 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_graphs/bq_topic_grapher.py |
import os
from networkx import DiGraph
from memory_profiler import profile
from dotenv import load_dotenv
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.friend_graphs.bq_grapher import BigQueryGrapher
load_dotenv()
USERS_LIMIT = int(os.getenv("USERS_LIMIT", default="1000")) # forces us to have a limit, unlike the base grapher version
TOPIC = os.getenv("TOPIC", default="impeach")
START_AT = os.getenv("START_AT", default="2020-01-01 01:00:00") # On 1/15, The House of Representatives names seven impeachment managers and votes to transmit articles of impeachment to the Senate
END_AT = os.getenv("END_AT", default="2020-01-30 01:00:00")
class BigQueryTopicGrapher(BigQueryGrapher):
def __init__(self, users_limit=USERS_LIMIT, topic=TOPIC, convo_start_at=START_AT, convo_end_at=END_AT,
bq_service=None, gcs_service=None):
super().__init__(bq_service=bq_service, gcs_service=gcs_service)
self.users_limit = users_limit
self.topic = topic
self.convo_start_at = convo_start_at
self.convo_end_at = convo_end_at
print("---------------------------------------")
print("CONVERSATION FILTERS...")
print(f" USERS LIMIT: {self.users_limit}")
print(f" TOPIC: '{self.topic.upper()}' ")
print(f" BETWEEN: '{self.convo_start_at}' AND '{self.convo_end_at}'")
@property
def metadata(self):
return {**super().metadata, **{"conversation": {
"users_limit": self.users_limit,
"topic": self.topic,
"start_at": self.convo_start_at,
"end_at": self.convo_end_at,
}}} # merges dicts
@profile
def perform(self):
self.write_metadata_to_file()
self.upload_metadata()
self.start()
self.graph = DiGraph()
self.running_results = []
users = list(self.bq_service.fetch_random_users(limit=self.users_limit, topic=self.topic,
start_at=self.convo_start_at, end_at=self.convo_end_at))
print("FETCHED", len(users), "USERS")
screen_names = sorted([row["user_screen_name"] for row in users])
for row in self.bq_service.fetch_specific_user_friends(screen_names=screen_names):
self.counter += 1
if not self.dry_run:
self.graph.add_edges_from([(row["screen_name"], friend) for friend in row["friend_names"]])
if self.counter % self.batch_size == 0:
rr = {"ts": logstamp(), "counter": self.counter, "nodes": len(self.graph.nodes), "edges": len(self.graph.edges)}
print(rr["ts"], "|", fmt_n(rr["counter"]), "|", fmt_n(rr["nodes"]), "|", fmt_n(rr["edges"]))
self.running_results.append(rr)
self.end()
self.report()
self.write_results_to_file()
self.upload_results()
self.write_graph_to_file()
self.upload_graph()
if __name__ == "__main__":
grapher = BigQueryTopicGrapher.cautiously_initialized()
grapher.perform()
grapher.sleep()
| 3,146 | 36.464286 | 196 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_graphs/base_grapher.py |
import time
import os
from datetime import datetime as dt
import pickle
import json
from dotenv import load_dotenv
from networkx import DiGraph, write_gpickle
from pandas import DataFrame
from app import APP_ENV, DATA_DIR
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.gcs_service import GoogleCloudStorageService
load_dotenv()
DRY_RUN = (os.getenv("DRY_RUN", default="true") == "true")
USERS_LIMIT = os.getenv("USERS_LIMIT")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default=100))
class BaseGrapher():
"""
Parent class with helper methods for assembling the graph object.
Allows us to try various child class approaches to investigate and achieve memory optimization.
Is able to write graph objects to file and upload them to Google Cloud Storage.
Graph construction should be done in child class' perform() method.
Example:
grapher = BaseGrapher.cautiously_initialized()
grapher.start()
grapher.perform()
grapher.end()
grapher.report()
"""
def __init__(self, dry_run=DRY_RUN, batch_size=BATCH_SIZE, users_limit=USERS_LIMIT, gcs_service=None, job_id=None):
"""
Params:
dry_run (bool)
Whether or not to construct the graph object. If true, does not assemble the graph.
users_limit (int / None)
Optionally specifies the maximum number of users to fetch.
If running into problems constructing a graph from the entire dataset,
can just choose to create smaller graphs to get some kind of win.
batch_size (int)
When fetching from BigQuery, only determines the reporting interval.
When fetching from PostgreSQL database via psycopg, determines number of users fetched from the database at once, and also the reporting interval.
job_id (str / None)
A unique identifer to associate a given job's results files.
Is used as part of local filepaths and remote bucket paths, so should avoid including spaces or special characters.
Assigns a timestamp-based unique identifier by default.
"""
self.job_id = (job_id or dt.now().strftime("%Y-%m-%d-%H%M"))
self.dry_run = (dry_run == True)
self.batch_size = batch_size
if users_limit:
self.users_limit = int(users_limit)
else:
self.users_limit = None
self.local_dirpath = os.path.join(DATA_DIR, "graphs", "archived", self.job_id)
self.local_metadata_filepath = os.path.join(self.local_dirpath, "metadata.json")
self.local_results_filepath = os.path.join(self.local_dirpath, "results.csv")
self.local_edges_filepath = os.path.join(self.local_dirpath, "edges.gpickle")
self.local_graph_filepath = os.path.join(self.local_dirpath, "graph.gpickle")
self.gcs_service = (gcs_service or GoogleCloudStorageService())
self.gcs_dirpath = os.path.join("storage", "data", "archived", self.job_id)
self.gcs_metadata_filepath = os.path.join(self.gcs_dirpath, "metadata.json")
self.gcs_results_filepath = os.path.join(self.gcs_dirpath, "results.csv")
self.gcs_edges_filepath = os.path.join(self.gcs_dirpath, "edges.gpickle")
self.gcs_graph_filepath = os.path.join(self.gcs_dirpath, "graph.gpickle")
@classmethod
def cautiously_initialized(cls):
service = cls()
print("-------------------------")
print("GRAPHER CONFIG...")
print(" JOB ID:", service.job_id)
print(" DRY RUN:", str(service.dry_run).upper())
print(" USERS LIMIT:", service.users_limit)
print(" BATCH SIZE:", str(service.batch_size).upper())
print("-------------------------")
if APP_ENV == "development":
if input("CONTINUE? (Y/N): ").upper() != "Y":
print("EXITING...")
exit()
service.init_local_dir()
return service
def init_local_dir(self):
if not os.path.exists(self.local_dirpath):
os.mkdir(self.local_dirpath)
@property
def metadata(self):
return {"app_env": APP_ENV, "job_id": self.job_id, "dry_run": self.dry_run, "batch_size": self.batch_size}
def start(self):
print("-----------------")
print("JOB STARTING!")
self.start_at = time.perf_counter()
self.counter = 0
def perform(self):
"""To be overridden by child class"""
self.graph = DiGraph()
def end(self):
print("-----------------")
print("JOB COMPLETE!")
self.end_at = time.perf_counter()
self.duration_seconds = round(self.end_at - self.start_at, 2)
print(f"PROCESSED {fmt_n(self.counter)} USERS IN {fmt_n(self.duration_seconds)} SECONDS")
def report(self):
print("NODES:", fmt_n(len(self.graph.nodes)))
print("EDGES:", fmt_n(len(self.graph.edges)))
print("SIZE:", fmt_n(self.graph.size()))
def write_metadata_to_file(self, metadata_filepath=None):
metadata_filepath = metadata_filepath or self.local_metadata_filepath
print(logstamp(), "WRITING METADATA...")
with open(metadata_filepath, "w") as metadata_file:
json.dump(self.metadata, metadata_file)
def write_results_to_file(self, results_filepath=None):
results_filepath = results_filepath or self.local_results_filepath
print(logstamp(), "WRITING RESULTS...")
df = DataFrame(self.running_results)
df.to_csv(results_filepath)
def write_edges_to_file(self, edges_filepath=None):
edges_filepath = edges_filepath or self.local_edges_filepath
print(logstamp(), "WRITING EDGES...:")
with open(edges_filepath, "wb") as pickle_file:
pickle.dump(self.edges, pickle_file)
def write_graph_to_file(self, graph_filepath=None):
graph_filepath = graph_filepath or self.local_graph_filepath
print(logstamp(), "WRITING GRAPH...")
write_gpickle(self.graph, graph_filepath)
def upload_metadata(self):
print(logstamp(), "UPLOADING JOB METADATA...", self.gcs_metadata_filepath)
blob = self.gcs_service.upload(self.local_metadata_filepath, self.gcs_metadata_filepath)
print(logstamp(), blob) #> <Blob: impeachment-analysis-2020, storage/data/2020-05-26-0002/metadata.json, 1590465770194318>
def upload_results(self):
print(logstamp(), "UPLOADING JOB RESULTS...", self.gcs_results_filepath)
blob = self.gcs_service.upload(self.local_results_filepath, self.gcs_results_filepath)
print(logstamp(), blob) #> <Blob: impeachment-analysis-2020, storage/data/2020-05-26-0002/metadata.json, 1590465770194318>
def upload_edges(self):
print(logstamp(), "UPLOADING NETWORK EDGES...", self.gcs_edges_filepath)
blob = self.gcs_service.upload(self.local_edges_filepath, self.gcs_edges_filepath)
print(logstamp(), blob)
def upload_graph(self):
print(logstamp(), "UPLOADING GRAPH...", self.gcs_graph_filepath)
blob = self.gcs_service.upload(self.local_graph_filepath, self.gcs_graph_filepath)
print(logstamp(), blob)
def sleep(self):
if APP_ENV == "production":
print("SLEEPING...")
time.sleep(12 * 60 * 60) # twelve hours, more than enough time to stop the server
if __name__ == "__main__":
#grapher = BaseGrapher(job_id="2020-05-27-1537")
#grapher.upload_edges()
grapher = BaseGrapher.cautiously_initialized()
grapher.start()
grapher.perform()
grapher.end()
grapher.report()
| 7,716 | 40.047872 | 162 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_graphs/psycopg_batch_grapher.py |
from networkx import DiGraph
from memory_profiler import profile
from app.friend_graphs.psycopg_grapher import PsycopgGrapher
class Grapher(PsycopgGrapher):
@profile
def perform(self):
self.start()
self.graph = DiGraph()
self.cursor.execute(self.sql)
while True:
results = self.cursor.fetchmany(size=self.batch_size)
if not results: break
self.counter += len(results)
print(self.generate_timestamp(), self.counter)
if not self.dry_run:
for row in results:
user = row["screen_name"]
friends = row["friend_names"]
self.graph.add_node(user)
self.graph.add_nodes_from(friends)
self.graph.add_edges_from([(user, friend) for friend in friends])
self.end()
if __name__ == "__main__":
grapher = Grapher.cautiously_initialized()
grapher.perform()
grapher.write_graph_to_file()
| 1,011 | 28.764706 | 85 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_graphs/psycopg_grapher.py |
import psycopg2
from networkx import DiGraph
from memory_profiler import profile
from app import APP_ENV
from app.pg_pipeline.models import DATABASE_URL, USER_FRIENDS_TABLE_NAME
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.friend_graphs.base_grapher import BaseGrapher, DRY_RUN, BATCH_SIZE, USERS_LIMIT
class PsycopgGrapher(BaseGrapher):
def __init__(self, dry_run=DRY_RUN, batch_size=BATCH_SIZE, users_limit=USERS_LIMIT,
database_url=DATABASE_URL, table_name=USER_FRIENDS_TABLE_NAME):
super().__init__(dry_run=dry_run, batch_size=batch_size, users_limit=users_limit)
self.database_url = database_url
self.table_name = table_name
self.connection = psycopg2.connect(self.database_url)
self.cursor = self.connection.cursor(name="network_grapher", cursor_factory=psycopg2.extras.DictCursor) # A NAMED CURSOR PREVENTS MEMORY ISSUES!!!!
@property
def metadata(self):
return {**super().metadata, **{"database_url": self.database_url, "table_name": self.table_name}} # merges dicts
@property
def sql(self):
query = f"SELECT id, user_id, screen_name, friend_count, friend_names FROM {self.table_name} "
if self.users_limit:
query += f"LIMIT {self.users_limit};"
return query
@profile
def perform(self):
self.start()
self.write_metadata_to_file()
self.upload_metadata()
print(logstamp(), "CONSTRUCTING GRAPH OBJECT...")
self.graph = DiGraph()
self.running_results = []
self.cursor.execute(self.sql)
while True:
batch = self.cursor.fetchmany(size=self.batch_size)
if not batch: break
self.counter += len(batch)
if not self.dry_run:
for row in batch:
self.graph.add_edges_from([(row["screen_name"], friend) for friend in row["friend_names"]])
rr = {"ts": logstamp(), "counter": self.counter, "nodes": len(self.graph.nodes), "edges": len(self.graph.edges)}
print(rr["ts"], "|", fmt_n(rr["counter"]), "|", fmt_n(rr["nodes"]), "|", fmt_n(rr["edges"]))
self.running_results.append(rr)
self.cursor.close()
self.connection.close()
print(logstamp(), "GRAPH CONSTRUCTED!")
self.report()
self.write_results_to_file()
self.upload_results()
self.write_graph_to_file()
self.upload_graph()
self.end()
if __name__ == "__main__":
grapher = PsycopgGrapher.cautiously_initialized()
grapher.perform()
| 2,660 | 34.959459 | 155 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_graphs/bq_list_grapher.py |
import pickle
from networkx import DiGraph
from memory_profiler import profile
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.friend_graphs.bq_grapher import BigQueryGrapher
class BigQueryListGrapher(BigQueryGrapher):
@profile
def perform(self):
self.start()
self.write_metadata_to_file()
self.upload_metadata()
self.edges = []
self.running_results = []
for row in self.bq_service.fetch_user_friends_in_batches(limit=self.users_limit):
self.counter += 1
if not self.dry_run:
self.edges += [(row["screen_name"], friend) for friend in row["friend_names"]]
if self.counter % self.batch_size == 0:
rr = {"ts": logstamp(), "counter": self.counter, "edges": len(self.edges)}
print(rr["ts"], "|", fmt_n(rr["counter"]), "|", fmt_n(rr["edges"]))
self.running_results.append(rr)
self.write_results_to_file()
self.upload_results()
self.write_edges_to_file()
self.upload_edges()
print(logstamp(), "CONSTRUCTING GRAPH OBJECT...")
self.graph = DiGraph(self.edges)
print(logstamp(), "GRAPH CONSTRUCTED!")
self.report()
del self.running_results # remove in hopes of freeing up some memory
del self.edges # remove in hopes of freeing up some memory
self.write_graph_to_file()
#del self.graph # remove in hopes of freeing up some memory
self.upload_graph()
self.end()
def write_edges_to_file(self):
"""
overwrite the parent method because we need self.edges vs self.graph.edges
todo: inherit / mix-in
"""
print(logstamp(), "WRITING EDGES...:")
with open(self.local_edges_filepath, "wb") as pickle_file:
pickle.dump(self.edges, pickle_file) # write edges before graph is constructed
if __name__ == "__main__":
grapher = BigQueryListGrapher.cautiously_initialized()
grapher.perform()
grapher.sleep()
| 2,112 | 30.073529 | 94 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_graphs/psycopg_list_grapher.py |
from networkx import DiGraph
from memory_profiler import profile
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.friend_graphs.psycopg_grapher import PsycopgGrapher
class Grapher(PsycopgGrapher):
@profile
def perform(self):
self.edges = []
self.running_results = []
self.start()
self.cursor.execute(self.sql)
while True:
batch = self.cursor.fetchmany(size=self.batch_size)
if not batch: break
self.counter += len(batch)
if not self.dry_run:
for row in batch:
self.edges += [(row["screen_name"], friend) for friend in row["friend_names"]]
rr = {"ts": logstamp(), "counter": self.counter, "edges": len(self.edges)}
print(rr["ts"], "|", fmt_n(rr["counter"]), "|", fmt_n(rr["edges"]))
self.running_results.append(rr)
self.write_results_to_file()
self.upload_results()
self.write_edges_to_file()
self.upload_edges()
print(logstamp(), "CONSTRUCTING GRAPH OBJECT...")
self.graph = DiGraph(self.edges)
print(logstamp(), "GRAPH CONSTRUCTED!")
del self.edges # try to free up some memory maybe, before writing to file
self.report()
self.write_graph_to_file()
self.upload_graph()
self.end()
if __name__ == "__main__":
grapher = Grapher.cautiously_initialized()
grapher.perform()
grapher.sleep()
| 1,545 | 26.607143 | 98 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_graphs/bq_grapher.py |
from networkx import DiGraph
from memory_profiler import profile
from app.bq_service import BigQueryService
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.friend_graphs.base_grapher import BaseGrapher
class BigQueryGrapher(BaseGrapher):
def __init__(self, bq_service=None, gcs_service=None):
super().__init__(gcs_service=gcs_service)
self.bq_service = bq_service or BigQueryService()
@property
def metadata(self):
#meta = super().metadata
#meta["bq_service"] = self.bq_service.metadata
#return meta
return {**super().metadata, **self.bq_service.metadata} # merges dicts
@profile
def perform(self):
self.graph = DiGraph()
self.running_results = []
for row in self.bq_service.fetch_user_friends_in_batches():
self.counter += 1
if not self.dry_run:
self.graph.add_edges_from([(row["screen_name"], friend) for friend in row["friend_names"]])
if self.counter % self.batch_size == 0:
rr = {"ts": logstamp(), "counter": self.counter, "nodes": len(self.graph.nodes), "edges": len(self.graph.edges)}
print(rr["ts"], "|", fmt_n(rr["counter"]), "|", fmt_n(rr["nodes"]), "|", fmt_n(rr["edges"]))
self.running_results.append(rr)
if __name__ == "__main__":
grapher = BigQueryGrapher.cautiously_initialized()
grapher.write_metadata_to_file()
grapher.upload_metadata()
grapher.start()
grapher.perform()
grapher.end()
grapher.report()
grapher.write_results_to_file()
grapher.upload_results()
grapher.write_graph_to_file()
grapher.upload_graph()
grapher.sleep()
| 1,768 | 30.035088 | 128 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_graphs/psycopg_set_grapher.py |
from networkx import DiGraph
from memory_profiler import profile
from app.friend_graphs.psycopg_grapher import PsycopgGrapher
class Grapher(PsycopgGrapher):
@profile
def perform(self):
self.edges = set() # set prevents duplicates
self.running_results = []
self.cursor.execute(self.sql)
while True:
batch = self.cursor.fetchmany(size=self.batch_size)
if not batch: break
self.counter += len(batch)
if not self.dry_run:
for row in batch:
user = row["screen_name"]
friends = row["friend_names"]
self.edges.update([(user, friend) for friend in friends])
rr = {"ts": self.generate_timestamp(), "counter": self.counter, "edges": len(self.edges)}
print(rr["ts"], "|", self.fmt(rr["counter"]), "|", self.fmt(rr["edges"]))
self.running_results.append(rr)
print(self.generate_timestamp(), "CONSTRUCTING GRAPH OBJECT...")
self.graph = DiGraph(list(self.edges))
print(self.generate_timestamp(), "GRAPH CONSTRUCTED!")
if __name__ == "__main__":
grapher = Grapher.cautiously_initialized()
grapher.start()
grapher.perform()
grapher.end()
grapher.report()
grapher.write_results_to_file()
grapher.write_edges_to_file()
grapher.write_graph_to_file()
| 1,400 | 29.456522 | 101 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_graphs/graph_analyzer.py |
import os
import time
from functools import lru_cache
from networkx import read_gpickle, DiGraph
from dotenv import load_dotenv
from memory_profiler import profile
from app import DATA_DIR
from app.decorators.number_decorators import fmt_n
from app.friend_graphs.base_grapher import BaseGrapher
load_dotenv()
JOB_ID = os.getenv("JOB_ID", default="2020-05-30-0338")
STORAGE_MODE = os.getenv("STORAGE_MODE", default="local")
class GraphAnalyzer():
def __init__(self, job_id=JOB_ID, storage_mode=STORAGE_MODE):
"""
DEPRECATE ME IN FAVOR OF NEW GRAPH STORAGE SERVICE
Params:
job_id (str) the identifier of a completed job which has produced a corresponding graph object
storage_mode (str) where the graph object file has been stored ("local" or "remote")
"""
self.storage_mode = storage_mode
self.job_id = job_id
self.job = BaseGrapher(job_id=self.job_id)
self.local_dirpath = self.job.local_dirpath
def __repr__(self):
return f"<GraphAnalyzer {self.job_id}>"
@property
@lru_cache(maxsize=None) # memoizes the results
def graph(self):
return self.load_graph()
#@profile
def load_graph(self):
if not os.path.isdir(self.job.local_dirpath):
print("PREPARING LOCAL DOWNLOAD DIR...")
os.mkdir(self.job.local_dirpath)
if self.storage_mode == "local" or os.path.isfile(self.job.local_graph_filepath):
print("LOADING GRAPH FROM LOCAL FILE...")
return read_gpickle(self.job.local_graph_filepath)
elif self.storage_mode == "remote":
print("LOADING GRAPH FROM REMOTE STORAGE...")
self.job.gcs_service.download(self.job.gcs_graph_filepath, self.job.local_graph_filepath)
return read_gpickle(self.job.local_graph_filepath)
def report(self):
print("GRAPH:", type(self.graph))
print("NODES:", fmt_n(len(self.graph.nodes))) # self.graph.number_of_nodes()
print("EDGES:", fmt_n(len(self.graph.edges))) # self.graph.number_of_edges()
#print("SIZE:", fmt_n(self.graph.size())) # same as edges
if __name__ == "__main__":
analyzer = GraphAnalyzer()
print(analyzer)
analyzer.report()
| 2,266 | 30.486111 | 106 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp/model_promotion.py | import os
from app.nlp.model_storage import ModelStorage
MODEL_DIRPATH = os.getenv("MODEL_DIRPATH", default="tweet_classifier/models/logistic_regression/2020-09-08-1229")
if __name__ == "__main__":
storage = ModelStorage(dirpath=MODEL_DIRPATH)
storage.promote_model()
| 281 | 22.5 | 113 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp/model_storage.py | import os
import pickle
import json
from pprint import pprint
from shutil import copytree
from memory_profiler import profile
from app import DATA_DIR, seek_confirmation
from app.file_storage import FileStorage
from app.decorators.datetime_decorators import logstamp
MODELS_DIRPATH = "tweet_classifier/models" # os.path.join(DATA_DIR, "tweet_classifier", "models")
EXAMPLE_MODEL_DIRPATH = f"{MODELS_DIRPATH}/example"
BEST_MODEL_DIRPATH = f"{MODELS_DIRPATH}/current_best"
class ModelStorage(FileStorage):
def __init__(self, dirpath=EXAMPLE_MODEL_DIRPATH):
super().__init__(dirpath=dirpath)
self.local_model_filepath = os.path.join(self.local_dirpath, "model.gpickle")
self.gcs_model_filepath = os.path.join(self.gcs_dirpath, "model.gpickle")
self.local_vectorizer_filepath = os.path.join(self.local_dirpath, "vectorizer.gpickle")
self.gcs_vectorizer_filepath = os.path.join(self.gcs_dirpath, "vectorizer.gpickle")
self.local_scores_filepath = os.path.join(self.local_dirpath, "scores.json")
self.gcs_scores_filepath = os.path.join(self.gcs_dirpath, "scores.json")
#
# LOCAL STORAGE
#
def write_model(self, model):
print(logstamp(), "WRITING MODEL TO LOCAL FILE...")
with open(self.local_model_filepath, "wb") as f:
pickle.dump(model, f)
def read_model(self):
print(logstamp(), "READING MODEL FROM LOCAL FILE...")
with open(self.local_model_filepath, "rb") as f:
return pickle.load(f)
def write_vectorizer(self, vectorizer):
print(logstamp(), "WRITING VECTORIZER TO LOCAL FILE...")
with open(self.local_vectorizer_filepath, "wb") as f:
pickle.dump(vectorizer, f)
def read_vectorizer(self):
print(logstamp(), "READING VECTORIZER FROM LOCAL FILE...")
with open(self.local_vectorizer_filepath, "rb") as f:
return pickle.load(f)
def write_scores(self, scores):
print(logstamp(), "WRITING SCORES TO LOCAL FILE...")
with open(self.local_scores_filepath, "w") as f:
json.dump(scores, f)
#
# REMOTE STORAGE
#
def upload_model(self):
self.upload_file(self.local_model_filepath, self.gcs_model_filepath)
def download_model(self):
self.upload_file(self.gcs_model_filepath, self.local_model_filepath)
def upload_vectorizer(self):
self.upload_file(self.local_vectorizer_filepath, self.gcs_vectorizer_filepath)
def download_vectorizer(self):
self.upload_file(self.gcs_vectorizer_filepath, self.local_vectorizer_filepath)
def upload_scores(self):
self.upload_file(self.local_scores_filepath, self.gcs_scores_filepath)
#
# CONVENIENCE METHODS
#
def save_model(self, model):
self.write_model(model)
if self.wifi:
self.upload_model()
#@profile
def load_model(self):
"""Assumes the model already exists and is saved locally or remotely"""
if not os.path.isfile(self.local_model_filepath):
self.download_model()
return self.read_model()
def save_vectorizer(self, vectorizer):
self.write_vectorizer(vectorizer)
if self.wifi:
self.upload_vectorizer()
#@profile
def load_vectorizer(self):
"""Assumes the vectorizer already exists and is saved locally or remotely"""
if not os.path.isfile(self.local_vectorizer_filepath):
self.download_vectorizer()
return self.read_vectorizer()
def save_scores(self, scores):
self.write_scores(scores)
if self.wifi:
self.upload_scores()
#
# MODEL PROMOTION
#
def promote_model(self, destination=BEST_MODEL_DIRPATH):
blobs = list(self.gcs_service.bucket.list_blobs())
matching_blobs = [blob for blob in blobs if self.dirpath in blob.name]
print("MODEL FILES TO PROMOTE...")
pprint(matching_blobs)
seek_confirmation()
print("PROMOTING GCS MODEL FILES...")
for blob in matching_blobs:
file_name = blob.name.split("/")[-1] #> 'model.gpickle'
new_path = self.compile_gcs_dirpath(f"{destination}/{file_name}") #f"storage/data/{destination}/{file_name}"
self.gcs_service.bucket.copy_blob(blob, destination_bucket=self.gcs_service.bucket, new_name=new_path)
print("PROMOTING LOCAL MODEL FILES...")
local_destination = self.compile_local_dirpath(destination)
local_source = self.local_dirpath
copytree(local_source, local_destination, dirs_exist_ok=True)
if __name__ == "__main__":
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
storage = ModelStorage()
tv = TfidfVectorizer()
storage.save_vectorizer(tv)
model = LogisticRegression()
storage.save_model(model)
storage.save_scores({"accy":0.999, "features": 100})
same_tv = storage.load_vectorizer()
print(type(same_tv))
same_model = storage.load_model()
print(type(same_model))
# TODO: look at all models and ask if we want to promote one to be the "current_best"
| 5,211 | 32.197452 | 120 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp/bulk_predict.py | import os
from app import seek_confirmation
from app.job import Job
from app.bq_service import BigQueryService
from app.nlp.model_storage import ModelStorage, MODELS_DIRPATH
MODEL_NAME = os.getenv("MODEL_NAME", default="current_best")
LIMIT = os.getenv("LIMIT")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="100000"))
if __name__ == "__main__":
storage = ModelStorage(dirpath=f"{MODELS_DIRPATH}/{MODEL_NAME}")
tv = storage.load_vectorizer()
clf = storage.load_model()
bq_service = BigQueryService()
print("DESTROYING PREDICTIONS TABLE???")
seek_confirmation()
print("DESTROYING PREDICTIONS TABLE...")
bq_service.destructively_migrate_2_community_predictions_table()
job = Job()
job.start()
ids_batch = []
statuses_batch = []
for row in bq_service.fetch_unlabeled_statuses_in_batches(limit=LIMIT):
ids_batch.append(row["status_id"])
statuses_batch.append(row["status_text"])
job.counter += 1
if job.counter % BATCH_SIZE == 0:
results = clf.predict(tv.transform(statuses_batch))
batch = [{"status_id": s, "predicted_community_id": int(i)} for s, i in zip(ids_batch, results)]
bq_service.upload_predictions_in_batches(batch)
job.progress_report()
ids_batch = []
statuses_batch = []
batch = []
if len(statuses_batch) > 0:
results = clf.predict(tv.transform(statuses_batch))
batch = [{"status_id": s, "predicted_community_id": int(i)} for s, i in zip(ids_batch, results)]
bq_service.upload_predictions_in_batches(batch)
job.progress_report()
ids_batch = []
statuses_batch = []
batch = []
job.end()
| 1,743 | 29.596491 | 108 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp/model_training.py |
import os
from datetime import datetime
from pprint import pprint
from pandas import DataFrame
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report # accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
#from sklearn.ensemble import RandomForestClassifier
#from sklearn.pipeline import Pipeline
#from sklearn.model_selection import GridSearchCV
from app import APP_ENV, DATA_DIR, seek_confirmation
from app.job import Job
from app.decorators.number_decorators import fmt_n
from app.bq_service import BigQueryService
from app.nlp.model_storage import ModelStorage, MODELS_DIRPATH
LIMIT = os.getenv("LIMIT") # just used to get smaller datasets for development purposes
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="100000"))
def get_tweets():
bq_service = BigQueryService()
print("LIMIT:", LIMIT)
job = Job()
tweets = []
job.start()
for row in bq_service.fetch_labeled_tweets_in_batches(limit=LIMIT):
tweets.append(dict(row))
job.counter+=1
if job.counter % BATCH_SIZE == 0:
job.progress_report()
job.end()
print("FETCHED TWEETS:", fmt_n(len(tweets)))
return DataFrame(tweets)
if __name__ == "__main__":
tweets_df = get_tweets()
train_df, test_df = train_test_split(tweets_df, stratify=tweets_df["community_id"], test_size=0.2, random_state=99)
print("TEST/TRAIN SPLIT:", fmt_n(len(train_df)), fmt_n(len(test_df))) # consider: THREE-WAY SPLIT (test/train/eval)
print("--------------------------")
print("TRAINING DATA...")
print(fmt_n(len(train_df)))
print(train_df.head())
print(train_df["community_id"].value_counts()) # should ideally be around equal for each class!
training_text = train_df["status_text"]
training_labels = train_df["community_id"]
print("--------------------------")
print("TESTING DATA...")
print(fmt_n(len(test_df)))
print(test_df["community_id"].value_counts())
test_text = test_df["status_text"]
test_labels = test_df["community_id"]
print("--------------------------")
print("VECTORIZING...")
tv = TfidfVectorizer()
tv.fit(training_text)
print("FEATURES / TOKENS:", fmt_n(len(tv.get_feature_names())))
training_matrix = tv.transform(training_text)
print("FEATURE MATRIX (TRAIN):", type(training_matrix), training_matrix.shape)
test_matrix = tv.transform(test_text)
print("FEATURE MATRIX (TEST):", type(test_matrix), test_matrix.shape)
#
# MODELS (CUSTOM PIPELINE)
#
models = {
"logistic_regression": LogisticRegression(random_state=99),
"multinomial_nb": MultinomialNB()
}
#for model_name, model in models.values(): # TypeError: cannot unpack non-iterable LogisticRegression object
for model_name in models.keys():
model = models[model_name]
print("--------------------------")
print(f"{model_name.upper()}...")
print(model)
print("TRAINING...")
model.fit(training_matrix, training_labels)
print("TRAINING SCORES...")
training_predictions = model.predict(training_matrix)
training_scores = classification_report(training_labels, training_predictions, output_dict=True)
print("ACCY:", training_scores["accuracy"])
pprint(training_scores)
print("TEST SCORES...")
test_predictions = model.predict(test_matrix)
test_scores = classification_report(test_labels, test_predictions, output_dict=True)
print("ACCY:", test_scores["accuracy"])
pprint(test_scores)
print("SAVING MODEL FILES...")
model_id = ("dev" if APP_ENV == "development" else datetime.now().strftime("%Y-%m-%d-%H%M")) # overwrite same model in development
storage = ModelStorage(dirpath=f"{MODELS_DIRPATH}/{model_name}/{model_id}")
storage.save_vectorizer(tv)
storage.save_model(model)
storage.save_scores({
"model_name": model_name,
"model_id": model_id,
"features": len(tv.get_feature_names()),
"training_matrix": training_matrix.shape,
"test_matrix": test_matrix.shape,
"training_scores": training_scores,
"test_scores": test_scores
})
| 4,403 | 35.098361 | 138 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp/client.py | import os
from app.nlp.model_storage import ModelStorage, BEST_MODEL_DIRPATH
if __name__ == "__main__":
storage = ModelStorage(dirpath=BEST_MODEL_DIRPATH)
tv = storage.load_vectorizer()
print(type(tv))
print("FEATURES / TOKENS:", len(tv.get_feature_names())) #> 3842
clf = storage.load_model()
print(type(clf))
while True:
status_text = input("Status Text: ")
if not status_text:
print("THANKS! COME AGAIN!")
break
matrix = tv.transform([status_text])
#print(matrix)
result = clf.predict(matrix)
print("PREDICTED COMMUNITY ID:", result[0])
| 649 | 22.214286 | 68 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp/basilica/embedder.py | import os
from app.decorators.datetime_decorators import logstamp
from app.job import Job
from app.bq_service import BigQueryService
from app.nlp.basilica.service import BasilicaService
MIN_VAL = float(os.getenv("MIN_VAL", default="0.0"))
MAX_VAL = float(os.getenv("MAX_VAL", default="1.0"))
LIMIT = os.getenv("LIMIT")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="1_000"))
class BasilicaEmbedder(Job):
def __init__(self):
self.bq_service = BigQueryService()
self.bas_service = BasilicaService()
print("-------------------")
print("BASILICA EMBEDDER...")
print(" LIMIT:", LIMIT)
print(" BATCH SIZE:", BATCH_SIZE)
Job.__init__(self)
def perform(self):
self.start()
#self.bq_service.destructively_migrate_basilica_embeddings_table()
batch = []
fetch_in_batches = not (LIMIT and int(LIMIT) <= 200_000) # straight query if a small limit is set
for row in self.bq_service.fetch_basilica_embedless_partitioned_statuses(min_val=MIN_VAL, max_val=MAX_VAL, limit=LIMIT, in_batches=fetch_in_batches):
batch.append(dict(row))
if len(batch) >= BATCH_SIZE: # FULL BATCH
self.counter += self.save_batch(batch)
self.progress_report()
batch = []
if len(batch) >= 0: # LAST BATCH (POSSIBLY NOT FULL)
self.counter += self.save_batch(batch)
self.progress_report()
batch = []
self.end()
def save_batch(self, batch):
try:
embeddings = list(self.bas_service.embed_tweets([row["status_text"] for row in batch]))
print(logstamp(), "EMBEDDINGS COMPLETE!")
except Exception as err:
print(logstamp(), "OOPS", err, "SKIPPING...")
return 0
for i, row in enumerate(batch):
row["embedding"] = embeddings[i]
del row["status_text"]
self.bq_service.upload_basilica_embeddings(batch)
print(logstamp(), "UPLOAD COMPLETE!")
return len(batch)
if __name__ == "__main__":
embedder = BasilicaEmbedder()
embedder.perform()
| 2,164 | 30.376812 | 157 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp/basilica/embedder_parallel.py | import os
from concurrent.futures import ThreadPoolExecutor, as_completed
from threading import current_thread #, #Thread, Lock, BoundedSemaphore
from app.job import Job
from app.decorators.datetime_decorators import logstamp
from app.bq_service import BigQueryService, split_into_batches
from app.nlp.basilica.service import BasilicaService
MIN_VAL = float(os.getenv("MIN_VAL", default="0.0"))
MAX_VAL = float(os.getenv("MAX_VAL", default="1.0"))
LIMIT = os.getenv("LIMIT")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="1_000"))
PARALLEL = (os.getenv("PARALLEL", default="true") == "true")
MAX_THREADS = int(os.getenv("MAX_THREADS", default=10))
def perform(batch, bq_service, bas_service):
thread_name = current_thread().name
print(logstamp(), thread_name, "...")
try:
embeddings = list(bas_service.embed_tweets([row["status_text"] for row in batch], timeout=100))
print(logstamp(), thread_name, "EMBEDDINGS COMPLETE!")
except Exception as err:
print(logstamp(), thread_name, "OOPS", err, "SKIPPING...")
return 0
for i, row in enumerate(batch):
row = dict(row)
row["embedding"] = embeddings[i]
del row["status_text"]
#print(logstamp(), thread_name, "PROCESSING COMPLETE!")
bq_service.upload_basilica_embeddings(batch)
#print(logstamp(), thread_name, "UPLOAD COMPLETE!")
return len(batch)
if __name__ == "__main__":
print("-------------------")
print("BASILICA EMBEDDER...")
print(" MIN PARTITION VAL:", MIN_VAL)
print(" MAX PARTITION VAL:", MAX_VAL)
print(" LIMIT:", LIMIT)
print(" BATCH SIZE:", BATCH_SIZE)
bq_service = BigQueryService()
bas_service = BasilicaService()
job = Job()
job.start()
records = list(bq_service.fetch_basilica_embedless_partitioned_statuses(min_val=MIN_VAL, max_val=MAX_VAL, limit=LIMIT))
job.counter = len(records)
batches = list(split_into_batches(records, BATCH_SIZE))
print("BATCHES:", len(batches))
job.end()
del records
job.start()
with ThreadPoolExecutor(max_workers=MAX_THREADS, thread_name_prefix="THREAD") as executor:
futures = [executor.submit(perform, batch, bq_service, bas_service) for batch in batches]
for future in as_completed(futures):
job.counter += future.result()
job.progress_report()
job.end()
| 2,381 | 31.630137 | 123 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/nlp/basilica/service.py |
import basilica
import os
from dotenv import load_dotenv
load_dotenv()
API_KEY = os.getenv("BASILICA_API_KEY")
class BasilicaService:
def __init__(self):
self.client = basilica.Connection(API_KEY)
print("-------------------------")
print("BASILICA SERVICE...")
print(" CLIENT:", type(self.client)) #> <class 'basilica.Connection'>
def embed_tweets(self, status_texts, timeout=100):
return self.client.embed_sentences(status_texts, model="twitter", timeout=timeout) #> generator object
if __name__ == "__main__":
bas = BasilicaService()
print("---------")
sentence = "Hello again"
print(sentence)
sent_embeddings = bas.client.embed_sentence(sentence)
print(list(sent_embeddings))
print("---------")
sentences = ["Hello world!", "How are you?"]
print(sentences)
# it is more efficient to make a single request for all sentences...
embeddings = bas.client.embed_sentences(sentences)
print("EMBEDDINGS...")
print(type(embeddings))
print(list(embeddings)) # [[0.8556405305862427, ...], ...]
embeddings = bas.client.embed_sentences(sentences, model="twitter", timeout=100)
print("EMBEDDINGS...")
print(type(embeddings))
print(list(embeddings))
| 1,272 | 26.673913 | 110 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_collection/twitter_scraper.py |
import os
from pprint import pprint
from http.cookiejar import CookieJar
import urllib
from dotenv import load_dotenv
from bs4 import BeautifulSoup
load_dotenv()
SCREEN_NAME = os.getenv("SCREEN_NAME", default="s2t2")
MAX_FRIENDS = int(os.getenv("MAX_FRIENDS", default=2000)) # the max number of friends to fetch per user
VERBOSE_SCRAPER = os.getenv("VERBOSE_SCRAPER", default="false") == "true"
def get_friends(screen_name=SCREEN_NAME, max_friends=MAX_FRIENDS):
"""For a given user, fetches all screen names of users they follow, up to a specified limit"""
friend_names = []
next_page_id = None
page_counter = 0
while True:
page, next_page_id = next_page_of_friends(screen_name, next_page_id)
friend_names += page
page_counter += 1
if len(friend_names) >= max_friends or next_page_id is None:
break
return friend_names
def next_page_of_friends(screen_name, next_cursor_id=None):
"""
Raises urllib.error.HTTPError if the user is private or their screen name has changed
"""
request_url = f"https://mobile.twitter.com/{screen_name}/following"
if next_cursor_id:
request_url += f"?cursor={next_cursor_id}"
cookie_jar = CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie_jar))
headers = [
('Host', "twitter.com"),
('User-Agent', "Mozilla/5.0 (Windows NT 6.1; Win64; x64)"),
('Accept', "application/json, text/javascript, */*; q=0.01"),
('Accept-Language', "de,en-US;q=0.7,en;q=0.3"),
('X-Requested-With', "XMLHttpRequest"),
('Referer', request_url),
('Connection', "keep-alive")
]
opener.addheaders = headers
#print(type(opener)) #> <class 'urllib.request.OpenerDirector'>
try:
response = opener.open(request_url)
#print(type(response)) #> <class 'http.client.HTTPResponse'>
except urllib.error.HTTPError as err: # consider allowing error to bubble up and be handled at the worker level (friend_collector.py)
if VERBOSE_SCRAPER:
print("FRIENDS PAGE NOT FOUND:", screen_name.upper())
breakpoint()
return [], None
response_body = response.read()
#print(type(response_body)) #> bytes
soup = BeautifulSoup(response_body.decode(), "html.parser")
#print(type(soup)) #> <class 'bs4.BeautifulSoup'>
#print(soup.prettify())
#
# <span class="count">262</span>
#
#friends_count = int(soup.find("span", "count").text)
#print("FRIENDS COUNT (TOTAL / EXPECTED):", friends_count)
#
# <form action="/i/guest/follow/SCREEN_NAME_X" method="post">
# <span class="m2-auth-token">
# <input name="authenticity_token" type="hidden" value="..."/>
# </span>
# <span class="w-button-common w-button-follow">
# <input alt="..." src="https://ma.twimg.com/twitter-mobile/.../images/sprites/followplus.gif" type="image"/>
# </span>
# </form>
#
forms = soup.find_all("form")
substr = "/i/guest/follow/"
friend_names = [f.attrs["action"].replace(substr, "") for f in forms if substr in f.attrs["action"]]
if VERBOSE_SCRAPER: print("FRIENDS PAGE:", len(friend_names)) #> 20
try:
#
# <div class="w-button-more">
# <a href="/SCREEN_NAME/following?cursor=CURSOR_ID">...</a>
# </div>
#
next_link = soup.find("div", "w-button-more").find("a")
next_cursor_id = next_link.attrs["href"].split("/following?cursor=")[-1]
#print("NEXT CURSOR ID:", next_cursor_id)
except AttributeError as err:
# handle AttributeError: 'NoneType' object has no attribute 'find'
# because the last page doesn't have a next page or corresponding "w-button-more"
next_cursor_id = None
return friend_names, next_cursor_id
if __name__ == "__main__":
print("--------------------")
print("USER:", SCREEN_NAME)
print("MAX_FRIENDS:", MAX_FRIENDS)
friend_names = get_friends()
print("FRIENDS:", len(friend_names))
| 4,069 | 35.666667 | 137 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_collection/investigate_threadpool_executor.py | # super h/t: https://www.youtube.com/watch?v=IEEhzQoKtQU
import os
import time
import random
from dotenv import load_dotenv
from concurrent.futures import ThreadPoolExecutor, as_completed # see: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor
from threading import Thread, Lock, BoundedSemaphore, current_thread
load_dotenv()
LIMIT = int(os.getenv("USERS_LIMIT", default=500))
MAX_THREADS = int(os.getenv("MAX_THREADS", default=200)) # heroku supports max 256, see: https://devcenter.heroku.com/articles/dynos#process-thread-limits
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default=20))
def fetch_friends(user_id, sleep_seconds=1):
thread_id = int(current_thread().name.replace("THREAD_", "")) + 1
time.sleep(sleep_seconds)
return {"thread_id": thread_id, "user_id": user_id, "duration": sleep_seconds}
if __name__ == "__main__":
user_ids = range(1, LIMIT + 1)
start_at = time.perf_counter()
print(f"USERS: {len(user_ids)}")
print(f"THREADS: {MAX_THREADS}")
with ThreadPoolExecutor(max_workers=MAX_THREADS, thread_name_prefix="THREAD") as executor:
#print("EXECUTOR:", type(executor))
#results = executor.map(fetch_friends, user_ids, random.choice([1,5]))
#for result in results:
# print(result)
#futures = [executor.submit(fetch_friends, user_id, random.choice([1,5])) for user_id in user_ids]
#for future in futures:
# print(future.result())
#batch = BoundedSemaphore(5)
#lock = Lock()
batch = []
results = []
futures = [executor.submit(fetch_friends, user_id, random.choice([1,5,10])) for user_id in user_ids]
for index, future in enumerate(as_completed(futures)):
result = future.result()
print(result)
batch.append(result)
results.append(result)
if len(batch) == BATCH_SIZE:
print(f"CLEARING BATCH OF {len(batch)}...")
#time.sleep(5)
batch = []
end_at = time.perf_counter()
clock_seconds = round(end_at - start_at, 2)
total_seconds = sum([result["duration"] for result in results])
print(f"PROCESSED {len(user_ids)} USERS IN {clock_seconds} SECONDS (OTHERWISE {total_seconds} SECONDS)")
| 2,319 | 37.032787 | 166 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_collection/twint_scraper.py |
# import os
# from pprint import pprint
#
# import twint
# from dotenv import load_dotenv
#
# load_dotenv()
#
# SCREEN_NAME = os.getenv("TWITTER_SCREEN_NAME", default="elonmusk") # just one to use for testing purposes
# VERBOSE = (os.getenv("VERBOSE_SCRAPER", default="false") == "true") # set like... VERBOSE_SCRAPER="true"
# MAX_FRIENDS = int(os.getenv("MAX_FRIENDS", default=2000)) # the max number of friends to get for each user
#
# class TwitterScraper():
#
# def __init__(self, screen_name, max_friends=MAX_FRIENDS, verbose=VERBOSE):
# """ Params:
# screen_name (str) like "barackobama" or "s2t2"
# max_friends (int)
# verbose (bool)
# """
# self.screen_name = screen_name
# self.max_friends = max_friends
# self.verbose = verbose
#
# self.basic_config = twint.Config()
# self.basic_config.Username = screen_name
# self.basic_config.Limit = max_friends
# self.basic_config.Hide_output = (verbose == False)
# self.basic_config.Store_object = True
#
# def get_friend_names(self):
# """ (FASTER APPROACH)
# Returns a list of the user's friends' screen names (or empty list if the account was private)
# """
# config = self.basic_config
# config.User_full = False # a faster approach, but only has screen names
# config.Store_object_follow_list = [] # initialize a place to store the screen names
# twint.run.Following(config)
# return config.Store_object_follow_list
#
# def get_friend_ids(self):
# """ (SLOWER APPROACH)
# Returns a list of the user's friends' ids (or empty list if the account was private)
# """
# config = self.basic_config
# config.User_full = True # a somewhat slow approach, but has ids
# twint.run.Following(config)
# return [obj["id"] for obj in twint.output.users_list]
#
# if __name__ == "__main__":
#
# scraper = TwitterScraper(SCREEN_NAME)
# print("USER:", scraper.screen_name)
#
# friend_names = scraper.get_friend_names()
# print("FRIENDS COUNT:", len(friend_names))
#
| 2,169 | 35.166667 | 108 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_collection/twitter_service.py |
# import os
# from pprint import pprint
# from dotenv import load_dotenv
# import tweepy
#
# load_dotenv()
#
# CONSUMER_KEY = os.getenv("TWITTER_CONSUMER_KEY", default="OOPS")
# CONSUMER_SECRET = os.getenv("TWITTER_CONSUMER_SECRET", default="OOPS")
# ACCESS_KEY = os.getenv("TWITTER_ACCESS_TOKEN", default="OOPS")
# ACCESS_SECRET = os.getenv("TWITTER_ACCESS_TOKEN_SECRET", default="OOPS")
#
# SCREEN_NAME = os.getenv("TWITTER_SCREEN_NAME", default="elonmusk") # just one to use for testing purposes
#
# def twitter_api():
# """
# But this one might be faster?
# See:
# https://developer.twitter.com/en/docs/basics/rate-limiting
# """
# auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
# auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
# api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
# return api
#
# def twitter_faster_api():
# """
# Use auth with less rate-limiting.
# See:
# http://docs.tweepy.org/en/v3.8.0/auth_tutorial.html
# https://bhaskarvk.github.io/2015/01/how-to-use-twitters-search-rest-api-most-effectively./
# """
# auth = tweepy.AppAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
# api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
# return api
#
# def get_friends(screen_name=None, user_id=None, max_friends=2000):
# """
# Params:
# screen_name like "barackobama" or "s2t2" or
# max_friends for now, for performacne, because we can always go back later and re-scrape those who hit this max
#
# Returns a list of the user's friend_ids (or empty list if the account was private)
#
# See: http://docs.tweepy.org/en/v3.8.0/api.html#API.friends_ids
# https://github.com/tweepy/tweepy/blob/3733fd673b04b9aa193886d6b8eb9fdaf1718341/tweepy/api.py#L542-L551
# http://docs.tweepy.org/en/v3.8.0/cursor_tutorial.html
# https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friends-ids
# https://developer.twitter.com/en/docs/basics/cursoring
# """
#
# api = twitter_faster_api() # todo: OOP
# #response = api.friends_ids(screen_name, cursor=-1)
# #friends_ids = response[0] #> list of max 5000 user_ids
# #pagination = response[1] #> (0, 1302882473214455035)
#
# if screen_name is not None:
# #print("GETTING FRIENDS FOR SCREEN NAME:", screen_name.upper())
# cursor = tweepy.Cursor(api.friends_ids, screen_name=screen_name, cursor=-1)
# elif user_id is not None:
# #print("GETTING FRIENDS FOR USER:", user_id)
# cursor = tweepy.Cursor(api.friends_ids, user_id=user_id, cursor=-1)
# else:
# print("OOPS PLEASE PASS SCREEN NAME OR USER ID")
# return None
# #print(cursor)
#
# friend_ids = []
# try:
# for friend_id in cursor.items(max_friends):
# friend_ids.append(friend_id)
# except tweepy.error.TweepError as err:
# print("OOPS", err) #> "Not authorized." if user is private / protected (e.g. 1003322728890462209)
# return friend_ids
#
# if __name__ == "__main__":
#
#
# api = twitter_api()
#
# cursor = tweepy.Cursor(api.friends_ids, screen_name="barackobama", cursor=-1)
# for page in cursor.pages(3):
# print(type(page)) #> list
# print(len(page)) #> 5000
#
# exit()
#
# print("-------------")
# print(SCREEN_NAME)
# api = twitter_api()
# friend_ids = api.friends_ids(SCREEN_NAME)
# print(len(friend_ids))
# faster_api = twitter_faster_api()
# friend_ids = faster_api.friends_ids(SCREEN_NAME)
# print(len(friend_ids))
#
# friend_ids = get_friends(screen_name=SCREEN_NAME)
# print(len(friend_ids))
#
# friend_ids = get_friends(user_id=148529707)
# print(len(friend_ids))
#
| 3,855 | 36.076923 | 121 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_collection/batch_per_thread.py |
import time
from threading import current_thread, BoundedSemaphore
from concurrent.futures import ThreadPoolExecutor #, as_completed
from app import SERVER_NAME, SERVER_DASHBOARD_URL
from app.email_service import send_email
from app.friend_collection import (MAX_THREADS, BATCH_SIZE, LIMIT, MIN_ID, MAX_ID,
user_with_friends, cautiously_initialized_storage_service, generate_timestamp
)
def split_into_batches(all_users, batch_size=BATCH_SIZE):
"""h/t: https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks"""
for i in range(0, len(all_users), batch_size):
yield all_users[i : i + batch_size]
#def process_batch(user_rows):
# return [user_with_friends(user_row) for user_row in user_rows]
def process_and_save_batch(user_rows, bq, lock=None):
print(generate_timestamp(), "|", current_thread().name, "|", "PROCESSING...")
#lock.acquire()
bq.insert_user_friends([user_with_friends(user_row) for user_row in user_rows])
print(generate_timestamp(), "|", current_thread().name, "|", "PROCESSED BATCH OF", len(user_rows))
#lock.release()
return True
def send_completion_email():
subject = "[Impeachment Tweet Analysis] Friend Collection Complete!"
html = f"""
<h3>Nice!</h3>
<p>Server '{SERVER_NAME}' has completed its work.</p>
<p>So please shut it off so it can get some rest.</p>
<p>
<a href='{SERVER_DASHBOARD_URL}'>{SERVER_DASHBOARD_URL}</a>
</p>
<p>Thanks!</p>
"""
response = send_email(subject, html)
return response
if __name__ == "__main__":
service = cautiously_initialized_storage_service()
users = service.fetch_remaining_users(min_id=MIN_ID, max_id=MAX_ID, limit=LIMIT)
print("FETCHED UNIVERSE OF", len(users), "USERS")
if any(users):
batches = list(split_into_batches(users))
print(f"ASSEMBLED {len(batches)} BATCHES OF {BATCH_SIZE}")
with ThreadPoolExecutor(max_workers=MAX_THREADS, thread_name_prefix="THREAD") as executor:
for batch in batches:
executor.submit(process_and_save_batch, batch, service)
else:
send_completion_email()
print("yeah")
time.sleep(1)
print("yeah")
time.sleep(1)
print("yeah")
time.sleep(12 * 60 * 60) # twelve hours
| 2,328 | 35.390625 | 106 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/friend_collection/__init__.py |
import os
from datetime import datetime
from dotenv import load_dotenv
from threading import current_thread, BoundedSemaphore
from concurrent.futures import ThreadPoolExecutor, as_completed # see: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor
from app import APP_ENV
from app.bq_service import BigQueryService, generate_timestamp
from app.friend_collection.twitter_scraper import get_friends, VERBOSE_SCRAPER, MAX_FRIENDS
load_dotenv()
MAX_THREADS = int(os.getenv("MAX_THREADS", default=10)) # the max number of threads to use, for concurrent processing
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default=20)) # the max number of processed users to store in BQ at once (with a single insert API call)
MIN_ID = os.getenv("MIN_USER_ID") # if partitioning users, the lower bound of the partition
MAX_ID = os.getenv("MAX_USER_ID") # if partitioning users, the upper bound of the partition
LIMIT = os.getenv("USERS_LIMIT") # max number of users to fetch from the db
#VERBOSE_COLLECTOR = os.getenv("VERBOSE_COLLECTOR", default="true") == "true"
def user_with_friends(row):
start_at = generate_timestamp()
#print(f"{start_at} | {current_thread().name} | {row.user_id}")
friend_names = sorted(get_friends(row.screen_name))
end_at = generate_timestamp()
print(f"{end_at} | {current_thread().name} | {row.user_id} | FRIENDS: {len(friend_names)}")
return {
"user_id": row.user_id,
"screen_name": row.screen_name,
"friend_count": len(friend_names),
"friend_names": friend_names,
"start_at": start_at,
"end_at": end_at
}
def cautiously_initialized_storage_service():
service = BigQueryService()
print("-------------------------")
print("BQ CONFIG...")
print(" BIGQUERY DATASET:", service.dataset_address.upper())
print(" DESTRUCTIVE MIGRATIONS:", service.destructive)
print(" VERBOSE QUERIES:", service.verbose)
print("-------------------------")
print("WORKER CONFIG...")
print(" MIN USER ID:", MIN_ID)
print(" MAX USER ID:", MAX_ID)
print(" USERS LIMIT:", LIMIT)
print(" MAX THREADS:", MAX_THREADS)
print(" BATCH SIZE:", BATCH_SIZE)
print("-------------------------")
print("SCRAPER CONFIG...")
print(" VERBOSE SCRAPER:", VERBOSE_SCRAPER)
print(" MAX FRIENDS:", MAX_FRIENDS)
print("-------------------------")
if APP_ENV == "development":
if input("CONTINUE? (Y/N): ").upper() != "Y":
print("EXITING...")
exit()
#service.init_tables() # did this originally, but moving to a manual migration strategy to prevent accidental deletions
return service
if __name__ == "__main__":
service = cautiously_initialized_storage_service()
users = service.fetch_remaining_users(min_id=MIN_ID, max_id=MAX_ID, limit=LIMIT)
print("FETCHED UNIVERSE OF", len(users), "USERS")
with ThreadPoolExecutor(max_workers=MAX_THREADS, thread_name_prefix="THREAD") as executor:
batch = []
lock = BoundedSemaphore()
futures = [executor.submit(user_with_friends, row) for row in users]
print("FUTURE RESULTS", len(futures))
for index, future in enumerate(as_completed(futures)):
result = future.result()
# OK, so this locking business:
# ... prevents random threads from clearing the batch, which was causing results to almost never get stored, and
# ... restricts a thread's ability to acquire access to the batch until another one has released it
lock.acquire()
batch.append(result)
if (len(batch) >= BATCH_SIZE) or (index + 1 >= len(futures)): # when batch is full or is last
print("-------------------------")
print(f"SAVING BATCH OF {len(batch)}...")
print("-------------------------")
service.insert_user_friends(batch)
batch = []
lock.release()
| 4,005 | 42.075269 | 166 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/botcode/mpi_cluster_manager.py |
#import os
#from pprint import pprint
#
#from mpi4py import MPI
#import numpy as np
#from networkx import DiGraph
#
#class ClusterManager:
# def __init__(self):
# self.node_name = MPI.Get_processor_name()
# self.intracomm = MPI.COMM_WORLD
#
# def inspect(self):
# print("----------------------")
# print("CLUSTER MANAGER")
# #print("----------------------")
# #print(self.intracomm) #> <mpi4py.MPI.Intracomm object at 0x10ed94a70>
# #print(dict(self.intracomm.info)) #> {'mpi_assert_no_any_source': 'false', 'mpi_assert_allow_overtaking': 'false'}
# #print("----------------------")
# print(" NODE NAME:", self.node_name) #> 'MJs-MacBook-Air.local'
# print(" NODE RANK:", self.node_rank) #> 0
# print(" CLUSTER SIZE:", self.cluster_size) #> 1
# print(" MAIN NODE?:", self.is_main_node) #> True
#
# @property
# def node_rank(self):
# return self.intracomm.Get_rank()
#
# @property
# def cluster_size(self):
# return self.intracomm.Get_size()
#
# @property
# def is_main_node(self):
# return self.node_rank + 1 == self.cluster_size
#
#if __name__ == "__main__":
#
# manager = ClusterManager()
# manager.inspect()
#
#
# # results = []
# # for i in range(0, manager.cluster_size - 1): # this is a no-op because cluster size is 1 on the main node
# # # for i in range(0, 3): # this just hangs if running only on one node?
# # result = manager.intracomm.recv(source=i)
# # results.append(result)
# # print(results)
#
| 1,583 | 28.886792 | 123 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/botcode/classifier.py |
import os
from pandas import DataFrame
from app.friend_graphs.graph_analyzer import GraphAnalyzer
from app.decorators.number_decorators import fmt_n, fmt_pct
from app.botcode.investigation import classify_bot_probabilities
if __name__ == "__main__":
manager = GraphAnalyzer()
retweet_graph = manager.graph
manager.report()
print("--------------------")
print("COMPUTING BOT PROBABILITIES...")
bot_probabilities, user_data = classify_bot_probabilities(retweet_graph)
df = DataFrame(bot_probabilities.items(), columns=["screen_name", "bot_probability"])
#df.set_index("screen_name", inplace=True)
users_count = len(df)
bot_count = len(df[df.bot_probability == 1])
print("TOTAL USERS:", fmt_n(users_count))
print(f"BOTS: {fmt_n(bot_count)} ({fmt_pct(bot_count / users_count)})")
df_full = DataFrame(user_data.values())
print("--------------------")
print("WRITING TO CSV...")
# TODO: really we can generate multiple classifications, using different hyperparams
# ... make separate "predictions" subdirectory,
# ... and include a corresponding hyperparams json file for each CSV file
predictions_dirpath = os.path.join(manager.local_dirpath, "preds")
if not os.path.isdir(predictions_dirpath):
os.mkdir(predictions_dirpath)
df.to_csv(os.path.join(predictions_dirpath, "botcode_probabilities.csv"))
df_full.to_csv(os.path.join(predictions_dirpath, "user_data.csv"))
| 1,469 | 34 | 89 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/botcode/network_classifier_helper.py |
#
# AN ADAPTATION OF THE ORIGINAL BOTCODE (SEE THE "START" DIR)
#
import os
from dotenv import load_dotenv
import numpy as np
from networkx import DiGraph, minimum_cut
# load_dotenv()
# HYPERPARAMETERS
MU = float(os.getenv("MU", default="1")) # called "gamma" in the paper
ALPHA_1 = float(os.getenv("ALPHA_1", default="100"))
ALPHA_2 = float(os.getenv("ALPHA_2", default="100"))
ALPHA = [MU, ALPHA_1, ALPHA_2]
#N_ITERS = int(os.getenv("N_ITERS", default="1"))
#DIRNAME = os.getenv("DIRNAME", default="impeachment-dev")
#PRIORS_MODE = os.getenv("PRIORS_MODE", default="normal") # should be one of ["boto", "random_unif", "random_gaus"]
LAMBDA_1 = float(os.getenv("LAMBDA_1", default="0.8")) # called "lamba11" in the paper
LAMBDA_2 = float(os.getenv("LAMBDA_2", default="0.6")) # called "lambda00" in the paper
EPSILON = float(os.getenv("EPSILON", default="0.001")) # called "delta" in the paper. should be close to 0 (eg. 0.001) in order for lambda10 to be slightly > (lambda00 + lambda11 - 1).
RANDOM_SEED = int(os.getenv("RANDOM_SEED", default="0"))
np.random.seed(RANDOM_SEED)
def parse_bidirectional_links(graph, weight_attr="rt_count"):
"""
Computes the degree to which the users in each edge were retweeting eachother.
Adapted from the "getLinkDataRestrained" function in the "networkClassifierHELPER" file.
Params:
graph (networkx.DiGraph) a retweet graph,
with each edge like: ("user", "retweeted_user", rt_count=10)
Returns:
a list of links, each like: ['user1', 'leader1', True, False, 4.0, 0]
where the first two values are the users,
the third boolean indicates whether or not the first retweeted the second,
the fourth boolean indicates whether or not the second retweeted the first,
the fifth float indicates the strength or number of times the first user retweeted the second,
the sixth float indicates the strength or number of times the second user retweeted the first
"""
edges = graph.edges(data=True)
#> OutEdgeDataView([('user1', 'leader1', {'rt_count': 4.0}), ('user2', 'leader1', {'rt_count': 6.0}), ('user3', 'leader2', {'rt_count': 4.0}), ('user4', 'leader2', {'rt_count': 2.0}), ('user5', 'leader3', {'rt_count': 4.0})])
weighted_edges = dict(((x,y), z[weight_attr]) for x, y, z in edges)
#> {('user1', 'leader1'): 4.0, ('user2', 'leader1'): 6.0, ('user3', 'leader2'): 4.0, ('user4', 'leader2'): 2.0, ('user5', 'leader3'): 4.0}
links = []
for k in weighted_edges:
user = k[0] #> 'user1'
retweeted_user = k[1] #> 'leader1'
edge_weight = weighted_edges[k] #> 4.0
reverse_edge_key = (retweeted_user, user)
if reverse_edge_key in weighted_edges.keys():
has_reverse_edge = True
reverse_edge_weight = weighted_edges[reverse_edge_key]
else:
has_reverse_edge = False
reverse_edge_weight = 0
link = [user, retweeted_user, True, has_reverse_edge, edge_weight, reverse_edge_weight]
#> ['user1', 'leader1', True, False, 4.0, 0]
links.append(link)
return links
def compute_link_energy(u1, u2, rt_count, in_graph, out_graph, alpha=ALPHA, alambda1=LAMBDA_1, alambda2=LAMBDA_2, epsilon=EPSILON):
"""
Computes joint energy potential between two users.
Copied unchanged from the "psi" function in the "networkClassifierHELPER" file.
Params:
u1 (int or str) unique identifier for user 1
u2 (int or str) unique identifier for user 2
rt_count (int) number of retweets from u1 to u2
out_graph (dict of ints) a graph that stores out degrees of accounts in retweet graph
in_graph (dict of ints) a graph that stores in degrees of accounts in retweet graph
alpha (list of floats) a list containing hyperparams mu, alpha1, alpha2
alambda1 (float) value of lambda11
alambda2 (float) value of lambda00
epsilon (int) exponent such that delta=10^(-espilon), where lambda01=lambda11+lambda00-1+delta
"""
hyperparam_const = (alambda2 + alambda1 - 1 + epsilon) #> 0.4009999999999999
# if user 1 is not retweeting or user2 is not getting retweeted...
# ... expects user 1 to be retweeting
# ... expects user 2 to be getting retweeted
if out_graph[u1] == 0 or in_graph[u2] == 0:
raise ValueError(f"Relationship problem: '{u1}' --> '{u2}'")
#breakpoint()
#here alpha is a vector of length three, psi decays according to a logistic sigmoid function
val_00 = 0
val_01 = 0
val_10 = 0
val_11 = 0
temp_1 = alpha[1] / float(out_graph[u1]) - 1 #> 100 / 8 = 12.5
temp_2 = alpha[2] / float(in_graph[u2]) - 1 #> 100 / 10 = 10.0
temp = temp_1 + temp_2
#print("TEMP", temp) #> 20.5
# what would get the temp to be less than 10? rt_count sufficiently high (>20) relative to ALPHA vals (100)
if temp < 10:
# see: https://numpy.org/doc/stable/reference/generated/numpy.exp.html
# np.exp(1) #> 2.718281828459045
# np.exp(2) #> 7.38905609893065
# np.exp(3) #> 20.085536923187668
# np.exp(4) #> 54.598150033144236
# np.exp(5) #> 148.4131591025766
# np.exp(6) #> 403.4287934927351
# np.exp(7) #> 1096.6331584284585
# np.exp(8) #> 2980.9579870417283
# np.exp(9) #> 8103.083927575384
# np.exp(10) #> 22026.465794806718
val_01 = rt_count * alpha[0] / (1 + np.exp(temp))
else:
val_01 = 0
#print("VAL:", val_01) #> 0
# all these depend on val_01, and if it is zero so are they
val_10 = hyperparam_const * val_01
val_00 = alambda2 * val_01
val_11 = alambda1 * val_01
test2 = 0.5 * val_11 + 0.25 * (val_10 - val_01)
test3 = 0.5 * val_00 + 0.25 * (val_10 - val_01)
if(min(test2, test3) < 0):
print("DETECTED / FIXING NEGATIVE EDGE")
val_00 = val_11 = 0.5 * val_01
if(val_00 + val_11 > val_01 + val_10):
print(u1, u2)
print('psi_01', val_01)
print('psi_11', val_11)
print('psi_00', val_00)
print('psi_10', val_10)
print("\n")
return [val_00, val_01, val_10, val_11]
# TODO: ENERGY GRAPHER
def compile_energy_graph(G, piBot, edgelist_data, graph_out, graph_in):
"""
Takes as input the RT graph and builds the energy graph.
Then cuts the energy graph to classify the bots.
Copied unchanged from the "computeH" function in the "networkClassifierHELPER" file.
Params:
G (ntwkX graph)
the Retweet Graph from buildRTGraph
piBot (dict of floats)
a dictionnary with prior on bot probabilities.
Keys are users_ids, values are prior bot scores.
edgelist_data (list of tuples)
information about edges to build energy graph.
This list comes in part from the getLinkDataRestrained method
graph_out (dict of ints)
a graph that stores out degrees of accounts in retweet graph
graph_in (dict of ints)
a graph that stores in degrees of accounts in retweet graph
"""
H = DiGraph()
user_data={i:{
'user_id':i,
'out':graph_out[i],
'in':graph_in[i],
'old_prob': piBot[i],
'phi_0': max(0,-np.log(float(10**(-20)+(1-piBot[i])))),
'phi_1': max(0,-np.log(float(10**(-20)+ piBot[i]))),
'prob':0,
'clustering':0
} for i in G.nodes()}
set_1 = [(el[0],el[1]) for el in edgelist_data]
set_2 = [(el[1],el[0]) for el in edgelist_data]
set_3 = [(el,0) for el in user_data]
set_4 = [(1,el) for el in user_data]
H.add_edges_from(set_1+set_2+set_3+set_4,capacity=0)
for i in edgelist_data:
val_00 = i[2][0]
val_01 = i[2][1]
val_10 = i[2][2]
val_11 = i[2][3]
H[i[0]][i[1]]['capacity']+= 0.5*(val_01+val_10-val_00-val_11)
H[i[1]][i[0]]['capacity'] += 0.5*(val_01+val_10-val_00-val_11)
H[i[0]][0]['capacity'] += 0.5*val_11+0.25*(val_10-val_01)
H[i[1]][0]['capacity'] += 0.5*val_11+0.25*(val_01-val_10)
H[1][i[0]]['capacity'] += 0.5*val_00+0.25*(val_01-val_10)
H[1][i[1]]['capacity'] += 0.5*val_00+0.25*(val_10-val_01)
if(H[1][i[0]]['capacity']<0):
print("Neg capacity")
break;
if(H[i[1]][0]['capacity']<0):
print("Neg capacity")
break;
if(H[1][i[1]]['capacity']<0):
print("Neg capacity")
break;
if(H[i[0]][0]['capacity']<0):
print("Neg capacity")
break;
for i in user_data.keys():
H[1][i]['capacity'] += user_data[i]['phi_0']
if(H[1][i]['capacity'] <0):
print("Neg capacity");
break;
H[i][0]['capacity'] += user_data[i]['phi_1']
if(H[i][0]['capacity'] <0):
print("Neg capacity");
break;
cut_value, mc = minimum_cut(H,1,0)
print("MIN CUT VALUE:", cut_value) #> 22.769643094754716 ... or 479357.85220684315 ... or whatever, depending on the graph
#print(mc) #> (
#> {1, 'user3', 'colead4', 'user4', 'user1', 'user5', 'colead1', 'user2'},
#> {0, 'colead3', 'leader1', 'leader3', 'colead2', 'leader2'}
#> )
PL = list(mc[0]) #the other way around
#print(PL) #> [1, 'user3', 'colead4', 'user4', 'user1', 'user5', 'colead1', 'user2']
if 1 not in PL:
print("Double check")
PL = list(mc[1])
PL.remove(1)
return H, PL, user_data
| 9,650 | 32.27931 | 229 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/botcode/investigation.py |
import os
from pprint import pprint
import numpy as np
from networkx import DiGraph
from app.botcode.network_classifier_helper import parse_bidirectional_links, compute_link_energy, compile_energy_graph
from app.decorators.number_decorators import fmt_n
from conftest import compile_mock_rt_graph
def classify_bot_probabilities(rt_graph, weight_attr="rt_count"):
"""
Given a retweet graph, computes bot probabilities, in a single function!
Params:
rt_graph (networkx.DiGraph) representing a retweet graph, with weights stored in the weight_attr param
weight_attr (str) the attribute in the edge data where the weights are.
in the rt graph, this represents number of times user a has retweeted user b
"""
in_degrees = dict(rt_graph.in_degree(weight=weight_attr)) # users receiving retweets
out_degrees = dict(rt_graph.out_degree(weight=weight_attr)) # users doing the retweeting
print("IN-DEGREES:", fmt_n(len(in_degrees)))
print("OUT-DEGREES:", fmt_n(len(out_degrees)))
links = parse_bidirectional_links(rt_graph)
energies = [(link[0], link[1], compute_link_energy(link[0], link[1], link[4], in_degrees, out_degrees)) for link in links]
print("ENERGIES:", fmt_n(len(energies)))
positive_energies = [e for e in energies if sum(e[2]) > 0]
print("POSITIVE ENERGIES:", fmt_n(len(positive_energies)))
prior_probabilities = dict.fromkeys(list(rt_graph.nodes), 0.5)
energy_graph, pl, user_data = compile_energy_graph(rt_graph, prior_probabilities, positive_energies, out_degrees, in_degrees)
print("ENERGIES GRAPHED...") # this is the step that takes the longest
bot_probabilities = dict.fromkeys(list(user_data.keys()), 0) # start with defaults of 0 for each user
for user in pl:
user_data[user]["clustering"] = 1
bot_probabilities[user] = 1
return bot_probabilities, user_data
if __name__ == "__main__":
graph = compile_mock_rt_graph()
print("----------------------")
in_degrees = dict(graph.in_degree(weight="rt_count")) # users receiving retweets
out_degrees = dict(graph.out_degree(weight="rt_count")) # users doing the retweeting
print("IN-DEGREES...")
pprint(in_degrees)
print("OUT-DEGREES...")
pprint(out_degrees)
print("----------------------")
print("ENSURING ALL NODES ARE REPRESENTED IN IN-DEGREE AND OUT-DEGREE VIEWS...")
for node in graph.nodes():
if node not in in_degrees.keys():
print("ADDING NODE TO IN-DEGREES")
in_degrees[node] = 0
if node not in out_degrees.keys():
print("ADDING NODE TO OUT-DEGREES")
out_degrees[node] = 0
print("IN-DEGREES:", len(in_degrees))
print("OUT-DEGREES:", len(out_degrees))
print("----------------------")
print("GATHERING LINKS...")
links = parse_bidirectional_links(graph) #
pprint(links) #> list of links like ['user1', 'leader1', True, False, 4.0, 0]
print("----------------------")
print("COMPUTING ENERGIES...")
energies = [(link[0], link[1], compute_link_energy(link[0], link[1], link[4], in_degrees, out_degrees)) for link in links]
print(len(energies))
#pprint(energies) #> list of tuples like... ('user1', 'leader1', [0.0, 0, 0.0, 0.0])
#print("----------------------")
positive_energies = [e for e in energies if sum(e[2]) > 0]
print("POSITIVE ENERGIES...")
print(len(positive_energies))
pprint(positive_energies)
print("----------------------")
print("STARTING WITH DEFAULT BOT PROBABILITIES (PRIORS)")
nodes = list(graph.nodes) #> ["user1", "user2", "user3", etc.]
prior_probabilities = dict.fromkeys(nodes, 0.5) # no priors, set all at 0.5!
#print(prior_probabilities) #> {'user1': 0.5, 'user2': 0.5, 'user3': 0.5}
print("----------------------")
print("CONSTRUCTING RETWEET ENERGY GRAPH...")
energy_graph, pl, user_data = compile_energy_graph(graph, prior_probabilities, positive_energies, out_degrees, in_degrees)
print(type(energy_graph), len(pl), len(user_data)) #> <class 'networkx.classes.digraph.DiGraph'> 7 12
print("PL:", pl)
#pprint(user_data)
# todo: write pl users list to csv (see writeCSVFile in ioHELPER)
# todo: write energy graph edges to CSV file (see writeCSVFile_H in ioHELPER)
print("----------------------")
print("COMPUTING BOT PROBABILITIES...")
bot_probabilities = dict.fromkeys(list(user_data.keys()), 0) # start with defaults of 0 for each user
for user in pl:
user_data[user]["clustering"] = 1
bot_probabilities[user] = 1
pprint(bot_probabilities)
# assert bot_probabilities == {
# 'colead1': 1, 'colead2': 0, 'colead3': 0, 'colead4': 1,
# 'leader1': 0, 'leader2': 0, 'leader3': 0,
# 'user1': 1, 'user2': 1, 'user3': 1, 'user4': 1, 'user5': 1
# }
# todo: write_bot_probabilities_to_csv(csv_filepath, bot_probabilities)
| 4,946 | 40.225 | 129 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/tweet_recollection/collector.py |
import os
from pprint import pprint
from functools import lru_cache
from dotenv import load_dotenv
from app import seek_confirmation, server_sleep
from app.bq_service import BigQueryService, split_into_batches, generate_timestamp
from app.twitter_service import TwitterService
from app.tweet_recollection.parser import parse_full_text
load_dotenv()
STATUS_LIMIT = int(os.getenv("STATUS_LIMIT", default="100_000")) # number of ids to fetch from BQ
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="100")) # must be less than 100
class Collector:
def __init__(self):
self.twitter_api = TwitterService().api
self.bq_service = BigQueryService()
self.limit = STATUS_LIMIT
self.batch_size = BATCH_SIZE
def fetch_remaining_status_ids(self):
sql = f"""
SELECT DISTINCT a.status_id
FROM `{self.bq_service.dataset_address}.all_status_ids` a
LEFT JOIN `{self.bq_service.dataset_address}.recollected_statuses` completed ON completed.status_id = a.status_id
WHERE completed.status_id IS NULL
LIMIT {self.limit}
"""
return [row["status_id"] for row in list(self.bq_service.execute_query(sql))]
def perform(self):
remaining_status_ids = self.fetch_remaining_status_ids()
if any(remaining_status_ids):
for batch_of_ids in split_into_batches(remaining_status_ids, batch_size=self.batch_size):
self.process_batch(batch_of_ids)
else:
print("OH ALL DONE! SLEEPING...")
server_sleep(10*60*60)
def lookup_statuses(self, status_ids):
"""Fetch full status info including urls, and full text.
Max per request is 100, so batch size must be smaller than that.
See:
https://docs.tweepy.org/en/stable/api.html#API.statuses_lookup
https://developer.twitter.com/en/docs/twitter-api/v1/tweets/post-and-engage/api-reference/get-statuses-lookup
"""
return self.twitter_api.statuses_lookup(
id_=status_ids,
include_entities=True, # this is where the full urls are
trim_user=True, # we already have this info
include_ext_alt_text=True, # If alt text has been added to any attached media entities, this parameter will return an ext_alt_text value in the top-level key for the media entity. If no value has been set, this will be returned as null.
include_card_uri=False,
map_=True, # "Tweets that do not exist or cannot be viewed by the current user will still have their key represented but with an explicitly null value paired with it"
tweet_mode="extended"
)
def process_batch(self, status_ids):
recollected_statuses = []
recollected_urls = []
success_counter = 0
for status in self.lookup_statuses(status_ids):
# when passing param map_=True to Twitter API, if statuses are not available, the status will be present, but will only have an id field
status_id = status.id # all statuses will have an id
recollected_status = {
"status_id": status_id,
"user_id": None,
"full_text": None,
"created_at": None,
"lookup_at": generate_timestamp()
} # represent failed lookups with null text values
if list(status._json.keys()) != ["id"]: # this will be the only field for empty statuses. otherwise try to parse them:
success_counter+=1
recollected_status["user_id"] = status.user.id
recollected_status["full_text"] = parse_full_text(status) # update the full text if possible
recollected_status["created_at"] = generate_timestamp(status.created_at)
for url in status.entities["urls"]:
recollected_urls.append({"status_id": status_id, "expanded_url": url["expanded_url"]})
recollected_statuses.append(recollected_status)
print(generate_timestamp(), f"| SAVING BATCH OF {len(status_ids)}", "| STATUSES:", success_counter, "| URLS:", len(recollected_urls))
self.save_statuses(recollected_statuses)
self.save_urls(recollected_urls)
def save_statuses(self, recollected_statuses):
self.bq_service.insert_records_in_batches(self.recollected_statuses_table, recollected_statuses)
def save_urls(self, recollected_urls):
self.bq_service.insert_records_in_batches(self.recollected_urls_table, recollected_urls)
@property
@lru_cache(maxsize=None)
def recollected_statuses_table(self):
return self.bq_service.client.get_table(f"{self.bq_service.dataset_address}.recollected_statuses")
@property
@lru_cache(maxsize=None)
def recollected_urls_table(self):
return self.bq_service.client.get_table(f"{self.bq_service.dataset_address}.recollected_status_urls")
if __name__ == '__main__':
collector = Collector()
print("LIMIT:", collector.limit)
print("BATCH SIZE:", collector.batch_size)
seek_confirmation()
collector.perform()
| 5,169 | 42.445378 | 248 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/tweet_recollection/parser.py |
def parse_full_text(status):
"""Param status (tweepy.models.Status)"""
return clean_text(status.full_text)
def clean_text(my_str):
"""Removes line-breaks for cleaner CSV storage. Handles string or null value.
Returns string or null value
Param my_str (str)
"""
try:
my_str = my_str.replace("\n", " ")
my_str = my_str.replace("\r", " ")
my_str = my_str.strip()
except AttributeError as err:
pass
return my_str
| 489 | 23.5 | 81 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/ks_test/impeachment_topic_pair_analyzers.py |
import os
from pprint import pprint
from itertools import combinations
from pandas import read_csv
from app.ks_test.topic_pair_analyzer import TopicPairAnalyzer, RESULTS_CSV_FILEPATH
from app.ks_test.impeachment_topics import IMPEACHMENT_TOPICS # todo: allow customization of topics list via CSV file
if __name__ == "__main__":
if os.path.isfile(RESULTS_CSV_FILEPATH):
df = read_csv(RESULTS_CSV_FILEPATH)
existing_ids = df["row_id"].tolist()
else:
existing_ids = []
topics = IMPEACHMENT_TOPICS
print(f"DETECTED {len(topics)} TOPICS...")
topic_pairs = list(combinations(topics, 2))
print(f"COMBINED INTO {len(topic_pairs)} TOPIC PAIRS...")
analyzers = [TopicPairAnalyzer(x_topic=xt, y_topic=yt) for xt, yt in topic_pairs]
analyzers = [analyzer for analyzer in analyzers if analyzer.row_id not in existing_ids]
print(f"OF WHICH {len(analyzers)} NEED TESTING..." )
for i, analyzer in enumerate(analyzers):
print("-----------------------------")
print(f"TESTING TOPIC PAIR {i+1} OF {len(analyzers)} - {analyzer.row_id.upper()}")
pprint(analyzer.report)
analyzer.append_results_to_csv()
| 1,187 | 33.941176 | 117 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/ks_test/interpreter.py |
import os
from dotenv import load_dotenv
load_dotenv()
PVAL_MAX = float(os.getenv("PVAL_MAX", default="0.01")) # the maximum pvalue under which to reject the ks test null hypothesis
def interpret(ks_test_result, pval_max=PVAL_MAX):
"""
Interprets the results of a KS test, indicates whether or not to reject the null hypothesis.
"Under the null hypothesis, the two distributions are identical."
"If the KS statistic is small or the p-value is high, then we cannot reject the null hypothesis."
See:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ks_2samp.html
Params:
result (scipy.stats.stats.KstestResult)
pval_max (float) the maximum pvalue threshold under which to reject the null hypothesis
"""
interpretation = "ACCEPT (SAME)"
if ks_test_result.pvalue <= pval_max:
interpretation = "REJECT (DIFF)"
return interpretation
| 1,008 | 33.793103 | 126 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/ks_test/topic_analyzer.py |
import os
from functools import lru_cache
from pprint import pprint
from dotenv import load_dotenv
import numpy as np
from scipy.stats import ks_2samp
from pandas import DataFrame, read_csv, concat
from app import DATA_DIR
from app.decorators.datetime_decorators import to_ts, fmt_date
from app.bq_service import BigQueryService
from app.ks_test.interpreter import interpret, PVAL_MAX
load_dotenv()
TOPIC = os.getenv("TOPIC", default="#MAGA")
RESULTS_CSV_FILEPATH = os.path.join(DATA_DIR, "ks_test", "topic_results.csv")
class TopicAnalyzer:
"""
Performs two-sample KS test on two independent populations of users: those retweeting about a topic vs those not.
See:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ks_2samp.html
Fetching strategy fetch_xy() can be customized in child classes to compare different independent user populations.
"""
def __init__(self, bq=None, topic=TOPIC, pval_max=PVAL_MAX, results_csv_filepath=RESULTS_CSV_FILEPATH):
self.topic = topic
self.bq = bq or BigQueryService()
self.x = []
self.y = []
self.pval_max = pval_max
self.interpret_ks = interpret
self.results_csv_filepath = results_csv_filepath
def fetch_xy(self):
print("FETCHING RETWEETERS...")
for row in self.bq.fetch_retweeters_by_topic_exclusive(self.topic):
ts = to_ts(row.user_created_at)
if row.count > 0:
self.x.append(ts)
else:
self.y.append(ts)
@property
@lru_cache(maxsize=None)
def xy_result(self):
if not self.x and not self.y: self.fetch_xy() # make sure data is fetched before trying to test it
print("-----------------------------")
print("TWO-SAMPLE KS TEST...")
result = ks_2samp(self.x, self.y)
print(type(result))
return result #> <class 'scipy.stats.stats.KstestResult'>
@property
def x_size(self):
return len(self.x) #> int
@property
def y_size(self):
return len(self.y) #> int
@property
def x_avg(self):
return np.mean(self.x) #> float (seconds since epoch)
@property
def y_avg(self):
return np.mean(self.y) #> float (seconds since epoch)
@property
def x_avg_date(self):
return fmt_date(self.x_avg) #> date string
@property
def y_avg_date(self):
return fmt_date(self.y_avg) #> date string
@property
@lru_cache(maxsize=None)
def report(self):
self.xy_result # make sure data is fetched and test has been performed before reporting out
return {
"row_id": self.row_id,
"topic": self.topic,
"x_size": self.x_size,
"y_size": self.y_size,
"x_avg": self.x_avg,
"y_avg": self.y_avg,
"x_avg_date": self.x_avg_date,
"y_avg_date": self.y_avg_date,
"ks_stat": self.xy_result.statistic,
"ks_pval": self.xy_result.pvalue,
"pval_max": self.pval_max,
"ks_inter": self.interpret_ks(self.xy_result, self.pval_max)
}
@property
def row_id(self):
"""should be unique for each topic in the CSV file"""
return self.topic.lower().replace(" ","") #> "#maga"
def append_results_to_csv(self, csv_filepath=None):
csv_filepath = csv_filepath or self.results_csv_filepath
print("WRITING TO FILE...", csv_filepath)
df = DataFrame(self.report, index=["row_id"])
if os.path.isfile(csv_filepath):
existing_df = read_csv(csv_filepath)
new_df = concat([existing_df, df])
new_df.drop_duplicates(subset=["row_id"], inplace=True, keep="first")
new_df.to_csv(csv_filepath, index=False)
return new_df
else:
df.to_csv(csv_filepath, index=False)
return df
if __name__ == "__main__":
analyzer = TopicAnalyzer()
pprint(analyzer.report)
analyzer.append_results_to_csv()
| 4,138 | 30.838462 | 118 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/ks_test/impeachment_topic_analyzers.py |
import os
from pprint import pprint
from pandas import read_csv
from app.ks_test.topic_analyzer import TopicAnalyzer, RESULTS_CSV_FILEPATH
from app.ks_test.impeachment_topics import IMPEACHMENT_TOPICS # todo: allow customization of topics list via CSV file
if __name__ == "__main__":
if os.path.isfile(RESULTS_CSV_FILEPATH):
df = read_csv(RESULTS_CSV_FILEPATH)
existing_ids = df["row_id"].tolist()
else:
existing_ids = []
topics = IMPEACHMENT_TOPICS # todo: topic customization
print(f"DETECTED {len(topics)} TOPICS...")
analyzers = [TopicAnalyzer(topic=topic) for topic in topics]
analyzers = [analyzer for analyzer in analyzers if analyzer.row_id not in existing_ids]
print(f"OF WHICH {len(analyzers)} NEED TESTING..." )
for i, analyzer in enumerate(analyzers):
print("-----------------------------")
print(f"TESTING TOPIC {i+1} OF {len(analyzers)}: '{analyzer.row_id.upper()}'")
pprint(analyzer.report)
analyzer.append_results_to_csv(RESULTS_CSV_FILEPATH)
| 1,055 | 34.2 | 117 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/ks_test/topic_pair_analyzer.py |
import os
from pprint import pprint
from functools import lru_cache
from dotenv import load_dotenv
from app import DATA_DIR
from app.decorators.datetime_decorators import to_ts
from app.ks_test.topic_analyzer import TopicAnalyzer
load_dotenv()
X_TOPIC = os.getenv("X_TOPIC", default="#MAGA")
Y_TOPIC = os.getenv("Y_TOPIC", default="#ImpeachAndRemove")
RESULTS_CSV_FILEPATH = os.path.join(DATA_DIR, "ks_test", "topic_pair_results.csv")
class TopicPairAnalyzer(TopicAnalyzer):
"""
Performs two-sample KS test on two independent populations of users talking about two different topics.
One sample is for users talking about topic x and not y.
The other sample is for users talking about topic y and not x.
"""
def __init__(self, x_topic=X_TOPIC, y_topic=Y_TOPIC, results_csv_filepath=RESULTS_CSV_FILEPATH):
super().__init__(results_csv_filepath=RESULTS_CSV_FILEPATH, topic=None) # topic None feels hacky, but its ok, we'll remove it from reporting
self.x_topic = x_topic
self.y_topic = y_topic
def fetch_xy(self):
print("FETCHING RETWEETERS...")
for row in self.bq.fetch_retweeters_by_topics_exclusive(self.x_topic, self.y_topic):
ts = to_ts(row.user_created_at)
if row.x_count > 0 and row.y_count == 0:
self.x.append(ts)
elif row.x_count == 0 and row.y_count > 0:
self.y.append(ts)
@property
@lru_cache(maxsize=None)
def report(self):
self.xy_result # make sure data is fetched and test has been performed before reporting out
return {
"row_id": self.row_id,
"x_topic": self.x_topic,
"y_topic": self.y_topic,
"x_size": self.x_size,
"x_avg_date": self.x_avg_date,
"x_avg": self.x_avg,
"y_size": self.y_size,
"y_avg_date": self.y_avg_date,
"y_avg": self.y_avg,
"ks_stat": self.xy_result.statistic,
"pval_max": self.pval_max,
"ks_pval": self.xy_result.pvalue,
"ks_inter": self.interpret_ks(self.xy_result, self.pval_max)
}
@property
def row_id(self):
"""should be unique for each pair of topics in the CSV file"""
sorted_topics = sorted([self.x_topic.lower().replace(" ",""), self.y_topic.lower().replace(" ","")])
return "_".join(sorted_topics) #> "#sometag_#othertag"
if __name__ == "__main__":
analyzer = TopicPairAnalyzer()
pprint(analyzer.report)
analyzer.append_results_to_csv()
| 2,565 | 35.140845 | 148 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/ks_test/investigation.py |
import numpy as np
from scipy.stats import kstest, ks_2samp
np.random.seed(2020)
if __name__ == "__main__":
# np.random.normal(center, spread, size)
x = np.random.normal(0, 1, 10)
y = np.random.normal(0, 1, 10)
z = np.random.normal(1.1, 0.9, 10) # one of these things is not like the other
print("X:", x) #> [-0.14235884, 2.05722174, 0.28326194, 1.32981198, -0.15462185, -0.06903086, 0.75518049, 0.82564665, -0.11306921, -2.36783759]
print("Y:", y) #> [-0.16704943, 0.68539797, 0.02350011, 0.45620128, 0.27049278, -1.43500814, 0.88281715, -0.58008166, -0.5015653 , 0.59095329]
print("Z:", z) #> [0.44154537, 1.33557992, 0.32978398, 0.93122668, 0.76386234, 0.68422613, 0.36518051, 1.05938903, 1.209195 , 1.9333575 ]
print("--------------------------")
print("KS TESTS (AGAINST NORMAL DISTRIBUTION)...")
print("X:", kstest(x, "norm")) #> KstestResult(statistic=0.33855972126320677, pvalue=0.1590112941560191)
print("Y:", kstest(y, "norm")) #> KstestResult(statistic=0.18866753744918974, pvalue=0.8060547009565138)
print("Z:", kstest(z, "norm")) #> KstestResult(statistic=0.6292184031091476, pvalue=0.0002318573221670418)
print("--------------------------")
print("TWO-SAMPLE KS TESTS...")
print("X-Y:", ks_2samp(x, y)) #> KstestResult(statistic=0.3, pvalue=0.7869297884777761)
print("X-Z:", ks_2samp(x, z)) #> KstestResult(statistic=0.6, pvalue=0.05244755244755244)
print("Y-Z", ks_2samp(y, z)) #> KstestResult(statistic=0.6, pvalue=0.05244755244755244)
#breakpoint()
| 1,560 | 46.30303 | 152 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/ks_test/impeachment_topics.py | # todo: allow customization of topics list via CSV file
IMPEACHMENT_TOPICS = [
# TAGS
"#MAGA",
# "#IGHearing", -- very small sample size
"#ImpeachAndConvict",
"#TrumpImpeachment",
##"#IGReport",
##"#SenateHearing",
##"#FactsMatter",
##"#ImpeachmentRally",
##"#ImpeachmentEve",
##"#ImpeachAndRemove",
##"#trumpletter",
##"#NotAboveTheLaw",
##"#25thAmendmentNow",
##"#ShamTrial",
##"#GOPCoverup",
##"#MitchMcCoverup",
##"#AquittedForever",
##"#CoverUpGOP",
##"#MoscowMitch",
##"#CountryOverParty",
##
### TERMS
###'sham',
###'hoax',
###'witch', -- could be witchhunt or witch-hunt or just witch
##'Trump',
##'Pelosi',
##'Schumer',
##'Schiff',
##'Nadler',
###'Yovanovitch', # low sample size
##'Vindman',
###'Volker', # low sample size
##'Sondland',
### 'amigos', # low sample size
##'Bolton',
##'Zelensk',
##'Fiona', # 'Fiona Hill',
##'Kent', # 'George Kent',
##'Taylor', # 'William Taylor',
##
### MENTIONS
##'@realDonaldTrump',
##'@senatemajldr',
##'@SpeakerPelosi',
##'@SenSchumer',
##'@JoeBiden',
##'@GOP',
##'@TheDemocrats',
##'@nytimes',
##'@WSJ',
##'@CNN',
##'@MSNBC',
##'@NBCNews',
##'@abcnews',
##'@thehill',
##'@politico',
]
| 1,357 | 20.903226 | 65 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/decorators/number_decorators.py |
def fmt_n(large_number):
"""
Formats a large number with thousands separator, for printing and logging.
Param large_number (int) like 1_000_000_000
Returns (str) like '1,000,000,000'
"""
return f"{large_number:,.0f}"
def fmt_pct(decimal_number):
"""
Formats a large number with thousands separator, for printing and logging.
Param decimal_number (float) like 0.95555555555
Returns (str) like '95.5%'
"""
return f"{(decimal_number * 100):.2f}%"
| 502 | 19.12 | 78 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/decorators/datetime_decorators.py |
from datetime import datetime, timezone
def logstamp():
"""
Formats current timestamp, for printing and logging.
"""
return dt_to_s(datetime.now())
#
# DATETIME DECORATORS
#
def dt_to_s(dt):
"""
Converts datetime object to date string like "2020-01-01 00:00:00"
Params: dt (datetime) like ... datetime(2020, 7, 26, 10, 29, 49, 828663)
"""
return dt.strftime("%Y-%m-%d %H:%M:%S")
def dt_to_date(dt):
"""
Converts datetime object to date string object like "2014-02-10".
Params: dt (datetime) like ... datetime(2020, 7, 26, 10, 29, 49, 828663)
"""
return dt.strftime("%Y-%m-%d")
def to_ts(dt): # todo: replace references with dt_to_ts
"""
Converts datetime object to UTC timestamp (seconds since epoch) like 1595759389.828663. Inverse of to_dt() function.
Params: dt (datetime) like ... datetime(2020, 7, 26, 10, 29, 49, 828663)
"""
return dt.replace(tzinfo=timezone.utc).timestamp()
def dt_to_ts(dt):
return to_ts(dt)
#
# TIMESTAMP DECORATORS
#
def fmt_date(ts): # todo: rename as ts_to_date
"""
Converts timestamp (seconds since epoch) to date string object like "2014-02-10".
Params: ts (float) seconds since epoch (like 1595759389.828663)
"""
return dt_to_date(to_dt(ts))
def to_dt(ts): # todo: rename as ts_to_dt
"""
Converts UTC timestamp (seconds since epoch) to datetime object like datetime(2020, 7, 26, 10, 29, 49, 828663). Inverse of to_ts() function.
Params: ts (float) seconds since epoch (like 1595759389.828663)
"""
return datetime.utcfromtimestamp(ts)
#
# STRING DECORATORS
#
def s_to_dt(s):
"""
Converts date string to datetime object
Params: s (str) like ... "2020-01-01 00:00:00"
"""
return datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
def s_to_date(s):
return dt_to_date(s_to_dt(s))
def date_to_dt(date):
return datetime.strptime(date, "%Y-%m-%d")
def date_to_ts(date):
"""
Converts datetime object to UTC timestamp (seconds since epoch) like 1329609600.0
Params: date (str) like "2020-01-01"
"""
return dt_to_ts(date_to_dt(date))
| 2,136 | 23.848837 | 144 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/tweet_collection_v2/bq_migrations.py |
from pprint import pprint
from app import seek_confirmation
from app.decorators.datetime_decorators import dt_to_s
from app.bq_service import BigQueryService
from app.tweet_collection_v2.csv_storage import LocalStorageService
if __name__ == "__main__":
bq_service = BigQueryService()
# TOPICS
bq_service.migrate_topics_table()
print("--------------------")
print("SEEDING TOPICS...")
local_storage = LocalStorageService()
topics = local_storage.fetch_topic_names()
bq_service.append_topics(topics)
for row in bq_service.fetch_topics():
print(row.topic, "|", dt_to_s(row.created_at))
# TWEETS
seek_confirmation()
if bq_service.destructive:
input(f"THIS WILL DESTROY THE TWEETS TABLE ON '{bq_service.dataset_address.upper()}'. ARE YOU REALLY SURE YOU WANT TO DO THIS?")
bq_service.migrate_tweets_table()
| 881 | 25.727273 | 136 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/tweet_collection_v2/tweet_parser.py |
from app.decorators.datetime_decorators import dt_to_s
def parse_status(status):
"""
Param status (tweepy.models.Status)
Converts a nested status structure into a flat row of non-normalized status and user attributes.
"""
if hasattr(status, "retweeted_status") and status.retweeted_status:
retweeted_status_id = status.retweeted_status.id_str
retweeted_user_id = status.retweeted_status.user.id
retweeted_user_screen_name = status.retweeted_status.user.screen_name
else:
retweeted_status_id = None
retweeted_user_id = None
retweeted_user_screen_name = None
user = status.user
row = {
"status_id": status.id_str,
"status_text": parse_string(parse_full_text(status)),
"truncated": status.truncated,
"retweeted_status_id": retweeted_status_id,
"retweeted_user_id": retweeted_user_id,
"retweeted_user_screen_name": retweeted_user_screen_name,
"reply_status_id": status.in_reply_to_status_id_str,
"reply_user_id": status.in_reply_to_user_id_str,
"is_quote": status.is_quote_status,
"geo": status.geo,
"created_at": dt_to_s(status.created_at),
"user_id": user.id_str,
"user_name": user.name,
"user_screen_name": user.screen_name,
"user_description": parse_string(user.description),
"user_location": user.location,
"user_verified": user.verified,
"user_created_at": dt_to_s(user.created_at),
}
return row
def parse_string(my_str):
"""
Removes line-breaks for cleaner CSV storage. Handles string or null value. Returns string or null value
Param my_str (str)
"""
try:
my_str = my_str.replace("\n", " ")
my_str = my_str.replace("\r", " ")
my_str = my_str.strip()
except AttributeError as err:
pass
return my_str
def parse_full_text(status):
"""Param status (tweepy.models.Status)"""
# GET FULL TEXT
# h/t: https://github.com/tweepy/tweepy/issues/974#issuecomment-383846209
if hasattr(status, "full_text"):
full_text = status.full_text
elif hasattr(status, "extended_tweet"):
full_text = status.extended_tweet["full_text"]
else:
full_text = status.text
return full_text
| 2,316 | 31.180556 | 107 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/tweet_collection_v2/stream_listener.py |
import os
from pprint import pprint
from time import sleep
from dotenv import load_dotenv
from tweepy.streaming import StreamListener
from tweepy import Stream
from urllib3.exceptions import ProtocolError
from app import seek_confirmation
from app.twitter_service import TwitterService
from app.bq_service import BigQueryService, generate_timestamp
from app.tweet_collection_v2.csv_storage import LocalStorageService
from app.tweet_collection_v2.tweet_parser import parse_status
load_dotenv()
STORAGE_ENV = os.getenv("STORAGE_ENV", default="local") # "local" OR "remote"
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="20"))
class TopicResetEvent(Exception):
pass
class TweetCollector(StreamListener):
def __init__(self, twitter_service=None, storage_env=STORAGE_ENV, bq_service=None, csv_service=None, batch_size=BATCH_SIZE):
self.twitter_service = twitter_service or TwitterService()
self.api = self.twitter_service.api
self.auth = self.api.auth
self.parse_status = parse_status
self.storage_env = storage_env
if self.storage_env == "local":
self.storage_service = csv_service or LocalStorageService()
elif self.storage_env == "remote":
self.storage_service = bq_service or BigQueryService()
else:
raise ValueError("Expecting the STORAGE_ENV to be 'local' or 'remote'. Please try again...")
self.batch_size = batch_size
self.batch = []
self.counter = 0
print("-------------------------------")
print("STREAM LISTENER...")
print(" STORAGE ENV:", self.storage_env.upper())
print(" STORAGE SERVICE:", type(self.storage_service))
print(" BATCH SIZE:", self.batch_size)
print("--------------------------------")
def set_topics(self):
self.topics = self.storage_service.fetch_topic_names()
print("SET TOPICS:", self.topics)
def reset_topics(self):
self.set_topics()
raise TopicResetEvent("Let's trigger the listener to re-start in a kind of hacky way :-D")
#
# LISTEN FOR TWEETS AND COLLECT THEM
#
def on_connect(self):
print("LISTENER IS CONNECTED!")
def on_status(self, status):
"""Param status (tweepy.models.Status)"""
if self.is_collectable(status):
self.counter +=1
print("----------------")
print(f"DETECTED AN INCOMING TWEET! ({self.counter} -- {status.id_str})")
self.collect_in_batches(status)
@staticmethod
def is_collectable(status):
"""Param status (tweepy.models.Status)"""
return (status.lang == "en"
#and status.user.verified
#and status.in_reply_to_status_id == None
#and status.in_reply_to_user_id == None
#and status.in_reply_to_screen_name == None
#and status.is_quote_status == False
#and status.retweeted == False
#and not hasattr(status, "retweeted_status")
)
def collect_in_batches(self, status):
"""
Param status (tweepy.models.Status)
Moving this logic out of on_status in hopes of preventing ProtocolErrors
Storing in batches to reduce API calls, and in hopes of preventing ProtocolErrors
"""
self.batch.append(self.parse_status(status))
if len(self.batch) >= self.batch_size:
self.store_and_clear_batch()
def store_and_clear_batch(self):
print("STORING BATCH OF", len(self.batch), "TWEETS...")
self.storage_service.append_tweets(self.batch)
print("CLEARING BATCH...")
self.batch = []
self.counter = 0
#
# HANDLE ERRORS
#
def on_exception(self, exception):
# has encountered errors:
# + urllib3.exceptions.ProtocolError: ('Connection broken: IncompleteRead(0 bytes read)'
# + urllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool
print("EXCEPTION:", type(exception))
print(exception)
def on_error(self, status_code):
print("ERROR:", status_code)
def on_limit(self, track):
"""Param: track (int) starts low and subsequently increases"""
print("RATE LIMITING", track)
sleep_seconds = self.backoff_strategy(track)
print("SLEEPING FOR:", sleep_seconds, "SECONDS...")
sleep(sleep_seconds)
@staticmethod
def backoff_strategy(i):
"""
Param: i (int) increasing rate limit number from the twitter api
Returns: number of seconds to sleep for
"""
return (int(i) + 1) ** 2 # raise to the power of two
def on_timeout(self):
print("TIMEOUT!")
return True # don't kill the stream!
def on_warning(self, notice):
print("DISCONNECTION WARNING:", type(notice))
print(notice)
def on_disconnect(self, notice):
print("DISCONNECT:", type(notice))
if __name__ == "__main__":
listener = TweetCollector()
seek_confirmation()
listener.set_topics()
stream = Stream(listener.auth, listener)
print("STREAM", type(stream))
while True:
try:
stream.filter(track=listener.topics)
except ProtocolError:
print("--------------------------------")
print("RESTARTING AFTER PROTOCOL ERROR!")
continue
except TopicResetEvent as event:
print("--------------------------------")
print("RESTARTING AFTER TOPICS REFRESH!")
continue
# this never gets reached
| 5,598 | 32.526946 | 128 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/tweet_collection_v2/csv_storage.py |
import os
from dotenv import load_dotenv
from pandas import DataFrame, read_csv, concat
from app import DATA_DIR, seek_confirmation
load_dotenv()
EVENT_NAME = os.getenv("EVENT_NAME", default="impeachment")
class LocalStorageService:
"""
Must have same methods and params as the remote version - see append_topics() and append_tweets() in BigQueryService
"""
def __init__(self, local_dirpath=None, event_name=EVENT_NAME):
self.event_name = event_name
self.local_dirpath = local_dirpath or os.path.join(DATA_DIR, "tweet_collection_v2", self.event_name)
self.topics_csv_filepath = os.path.join(self.local_dirpath, "topics.csv")
self.tweets_csv_filepath = os.path.join(self.local_dirpath, "tweets.csv")
print("--------------------")
print("LOCAL CSV STORAGE...")
print(" DIR:", os.path.abspath(self.local_dirpath))
print(" TOPICS CSV:", os.path.abspath(self.topics_csv_filepath))
print(" TWEETS CSV:", os.path.abspath(self.tweets_csv_filepath))
#seek_confirmation()
#if not os.path.isdir(local_dirpath):
# os.makedirs(local_dirpath)
# there should already be a topics.csv existing there...
def fetch_topic_names(self):
"""Returns a list of topic strings"""
topics_df = read_csv(self.topics_csv_filepath)
return topics_df["topic"].tolist()
def append_topics(self, topics):
"""
Param: topics (list of str) like ['topic1', 'topic 2']
"""
new_df = DataFrame(topics, columns=["topic"])
csv_filepath = self.topics_csv_filepath
if os.path.isfile(csv_filepath):
existing_df = read_csv(csv_filepath)
merged_df = concat([existing_df, new_df])
merged_df.drop_duplicates(subset=["topic"], inplace=True, keep="first")
merged_df.to_csv(csv_filepath, index=False)
else:
new_df.to_csv(csv_filepath, index=False)
def append_tweets(self, tweets):
"""
Param: tweets (list of dict)
"""
new_df = DataFrame(tweets, columns=list(tweets[0].keys()))
csv_filepath = self.tweets_csv_filepath
if os.path.isfile(csv_filepath):
new_df.to_csv(csv_filepath, mode="a", header=False, index=False)
else:
new_df.to_csv(csv_filepath, index=False)
| 2,379 | 34.522388 | 120 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/botcode_v2/classifier.py |
import os
import datetime
import time
from functools import lru_cache
from dotenv import load_dotenv
import numpy as np
from pandas import DataFrame
import matplotlib.pyplot as plt
from conftest import compile_mock_rt_graph
from app import APP_ENV
from app.decorators.number_decorators import fmt_n, fmt_pct
from app.friend_graphs.graph_analyzer import GraphAnalyzer
from app.botcode_v2.network_classifier_helper import getLinkDataRestrained as get_link_data_restrained # TODO: deprecate
from app.botcode_v2.network_classifier_helper import psi as link_energy
from app.botcode_v2.network_classifier_helper import computeH as compute_energy_graph
from app.botcode_v2.network_classifier_helper import compute_bot_probabilities
load_dotenv()
DRY_RUN = (os.getenv("DRY_RUN", default="true") == "true")
MU = float(os.getenv("MU", default="1"))
ALPHA_PERCENTILE = float(os.getenv("ALPHA_PERCENTILE", default="0.999"))
LAMBDA_00 = float(os.getenv("LAMBDA_00", default="0.61")) # TODO: interpretation of what this means
LAMBDA_11 = float(os.getenv("LAMBDA_11", default="0.83")) # TODO: interpretation of what this means
class NetworkClassifier:
def __init__(self, rt_graph, weight_attr="weight", mu=MU, alpha_percentile=ALPHA_PERCENTILE, lambda_00=LAMBDA_00, lambda_11=LAMBDA_11):
"""
Takes all nodes in a retweet graph and assigns each user a score from 0 (human) to 1 (bot).
Then writes the results to CSV file.
"""
self.rt_graph = rt_graph
self.weight_attr = weight_attr
# PARAMS FOR THE LINK ENERGY FUNCTION...
self.mu = mu
self.alpha_percentile = alpha_percentile
self.lambda_00 = lambda_00
self.lambda_11 = lambda_11
self.epsilon = 10**(-3) #> 0.001
#self.lambda_01 = 1
#self.lambda_10 = self.lambda_00 + self.lambda_11 - self.lambda_01 + self.epsilon
# ARTIFACTS OF THE BOT CLASSIFICATION PROCESS...
self.energy_graph = None
self.bot_ids = None
self.user_data = None
@property
@lru_cache(maxsize=None)
def links(self):
"""TODO: deprecate"""
print("-----------------")
print("LINKS...")
return get_link_data_restrained(self.rt_graph, weight_attr=self.weight_attr)
@property
@lru_cache(maxsize=None)
def in_degrees(self):
return self.rt_graph.in_degree(weight=self.weight_attr)
@property
@lru_cache(maxsize=None)
def out_degrees(self):
return self.rt_graph.out_degree(weight=self.weight_attr)
@property
@lru_cache(maxsize=None)
def alpha(self):
"""Params for the link_energy function"""
in_degrees_list = [v for _,v in self.in_degrees]
out_degrees_list = [v for _,v in self.out_degrees]
print("MAX IN:", fmt_n(max(in_degrees_list))) #> 76,617
print("MAX OUT:", fmt_n(max(out_degrees_list))) #> 5,608
alpha_in = np.quantile(in_degrees_list, self.alpha_percentile)
alpha_out = np.quantile(out_degrees_list, self.alpha_percentile)
print("ALPHA IN:", fmt_n(alpha_in)) #> 2,252
print("ALPHA OUT:", fmt_n(alpha_out)) #> 1,339
return [self.mu, alpha_out, alpha_in]
@property
@lru_cache(maxsize=None)
def link_energies(self):
"""TODO: refactor by looping through the edges in the RT graph instead....
link[0] is the edge[0]
link[1] is the edge[1]
link[4] is the weight attr value
"""
print("-----------------")
print("ENERGIES...")
return [(
link[0],
link[1],
link_energy(
link[0], link[1], link[4],
self.in_degrees, self.out_degrees,
self.alpha, self.lambda_00, self.lambda_11, self.epsilon
)
) for link in self.links]
@property
@lru_cache(maxsize=None)
def prior_probabilities(self):
return dict.fromkeys(list(self.rt_graph.nodes), 0.5) # set all screen names to 0.5
def compile_energy_graph(self):
print("COMPILING ENERGY GRAPH...")
self.energy_graph, self.bot_ids, self.user_data = compute_energy_graph(self.rt_graph, self.prior_probabilities, self.link_energies, self.out_degrees, self.in_degrees)
#self.human_names = list(set(self.rt_graph.nodes()) - set(self.bot_ids))
print("-----------------")
print("ENERGY GRAPH:", type(self.energy_graph))
print("NODE COUNT:", fmt_n(self.energy_graph.number_of_nodes()))
print(f"BOT COUNT: {fmt_n(len(self.bot_ids))} ({fmt_pct(len(self.bot_ids) / self.energy_graph.number_of_nodes())})")
print("USER DATA:", fmt_n(len(self.user_data.keys())))
@property
@lru_cache(maxsize=None)
def bot_probabilities(self):
if not self.energy_graph and not self.bot_ids:
self.compile_energy_graph()
return compute_bot_probabilities(self.rt_graph, self.energy_graph, self.bot_ids)
@property
@lru_cache(maxsize=None)
def bot_probabilities_df(self):
df = DataFrame(list(self.bot_probabilities.items()), columns=["user_id", "bot_probability"])
df.index.name = "row_id"
df.index = df.index + 1
print("--------------------------")
print("CLASSIFICATION COMPLETE!")
print(df.head())
print("... < 50% (NOT BOTS):", fmt_n(len(df[df["bot_probability"] < 0.5])))
print("... = 50% (NOT BOTS):", fmt_n(len(df[df["bot_probability"] == 0.5])))
print("... > 50% (MAYBE BOTS):", fmt_n(len(df[df["bot_probability"] > 0.5])))
print("... > 90% (LIKELY BOTS):", fmt_n(len(df[df["bot_probability"] > 0.9])))
return df
def generate_bot_probabilities_histogram(self, img_filepath=None, show_img=True, title="Bot Probability Scores (excludes 0.5)"):
probabilities = self.bot_probabilities_df["bot_probability"]
num_bins = round(len(probabilities) / 10)
counts, bin_edges = np.histogram(probabilities, bins=num_bins) # ,normed=True #> "VisibleDeprecationWarning: Passing `normed=True` on non-uniform bins has always been broken"...
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1])
plt.grid()
plt.xlabel("Bot probability")
plt.ylabel("CDF")
plt.hist(probabilities[probabilities < 0.5])
plt.hist(probabilities[probabilities > 0.5])
plt.grid()
plt.xlabel("Bot probability")
plt.ylabel("Frequency")
plt.title(title)
if img_filepath:
plt.savefig(img_filepath)
if show_img:
plt.show()
if __name__ == "__main__":
manager = GraphAnalyzer()
#
# LOAD RT GRAPH
#
if DRY_RUN:
rt_graph = compile_mock_rt_graph()
print("RT GRAPH:", type(rt_graph))
print(" NODES:", fmt_n(rt_graph.number_of_nodes()))
print(" EDGES:", fmt_n(rt_graph.number_of_edges()))
else:
rt_graph = manager.graph
manager.report()
if APP_ENV == "development":
if input("CONTINUE? (Y/N): ").upper() != "Y":
print("EXITING...")
exit()
#
# PERFORM BOT CLASSIFICATION
#
classifier = Classifier(rt_graph, weight_attr="rt_count")
df = classifier.bot_probabilities_df
#
# SAVE ARTIFACTS
#
artifacts_dir = os.path.join(manager.local_dirpath, "botcode_v2")
if not os.path.isdir(artifacts_dir):
os.mkdir(artifacts_dir)
if DRY_RUN:
csv_filepath = os.path.join(artifacts_dir, "mock_probabilities.csv")
img_filepath = os.path.join(artifacts_dir, "mock_probabilities_histogram.png")
else:
csv_filepath = os.path.join(artifacts_dir, f"bot_probabilities_{classifier.lambda_00}_{classifier.lambda_11}.csv")
img_filepath = os.path.join(artifacts_dir, f"bot_probabilities_{classifier.lambda_00}_{classifier.lambda_11}_histogram.png")
print("----------------")
print("SAVING CSV FILE...")
print(csv_filepath)
df.to_csv(csv_filepath)
print("----------------")
print("SAVING HISTOGRAM...")
print(img_filepath)
classifier.generate_bot_probabilities_histogram(img_filepath=img_filepath, show_img=(APP_ENV=="development"))
| 8,235 | 35.767857 | 185 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/botcode_v2/network_classifier_helper.py |
#
# A NEAR REPLICA OF BOTCODE VERSION 2 (SEE THE "START" DIR)
#
import math
from collections import defaultdict
from operator import itemgetter
import time
from datetime import datetime
import numpy as np
import networkx as nx
##########################################################################
####################### BUILD RETWEET NX-(SUB)GRAPH FROM DICTIONNARY #####
##########################################################################
'''
Takes as input a csv file of retweet relationships and builds
a NetworkX object, in order to apply prebuilt mincut algorithms
'''
def buildRTGraph(graph, subNodes, lowerBound=0):
'''
INPUTS:
## graph (csv file)
a csv file with ID of user retweeting, user retweeted, and number of retweets. (see README for more details)
## subNodes (list of ints)
a list of users IDs if you want to only consider a subgraph of the RT graph
## lowerBound (int)
an int to only consider retweet relationship if retweet count from User1 to User2 is above bound (sparsify graph)
'''
G = nx.DiGraph()
count = 0
firstInter = list(np.unique(np.intersect1d(subNodes, list(graph.keys()))))
for node in firstInter:
count += 1
print("at user n" + str(count) + " on " + str(len(graph)))
unique2, counts = np.unique(graph[node], return_counts=True)
res = dict(zip(unique2, counts))
inter = np.unique(np.intersect1d(unique2, subNodes))
for i in inter:
w = res[i]
if(i != node and w >= lowerBound):
G.add_node(node)
G.add_node(i)
G.add_edge(node, i, weight=w)
return G
############################################################################
####################### BUILD/CUT ENERGY GRAPH #############################
############################################################################
'''
Takes as input the RT graph and builds the energy graph.
Then cuts the energy graph to classify
'''
def computeH(G, piBot, edgelist_data, graph_out, graph_in):
H = nx.DiGraph()
'''
INPUTS:
## G (ntwkX graph)
the Retweet Graph from buildRTGraph
## piBot (dict of floats)
a dictionnary with prior on bot probabilities. Keys are users_ids, values are prior bot scores.
## edgelist_data (list of tuples)
information about edges to build energy graph.
This list comes in part from the getLinkDataRestrained method
## graph_out (dict of ints)
a graph that stores out degrees of accounts in retweet graph
## graph_in (dict of ints)
a graph that stores in degrees of accounts in retweet graph
'''
user_data = {i: {
'user_id': i,
'out': graph_out[i],
'in': graph_in[i],
'old_prob': piBot[i],
'phi_0': max(0, -np.log(float(10**(-20) + (1 - piBot[i])))),
'phi_1': max(0, -np.log(float(10**(-20) + piBot[i]))),
'prob': 0,
'clustering': 0
} for i in G.nodes()}
set_1 = [(el[0], el[1]) for el in edgelist_data]
set_2 = [(el[1], el[0]) for el in edgelist_data]
set_3 = [(el, 0) for el in user_data]
set_4 = [(1, el) for el in user_data]
H.add_edges_from(set_1 + set_2 + set_3 + set_4, capacity=0)
for i in edgelist_data:
val_00 = i[2][0]
val_01 = i[2][1]
val_10 = i[2][2]
val_11 = i[2][3]
# edges between nodes
H[i[0]][i[1]]['capacity'] += 0.5 * (val_01 + val_10 - val_00 - val_11)
H[i[1]][i[0]]['capacity'] += 0.5 * (val_01 + val_10 - val_00 - val_11)
# edges to sink (bot energy)
H[i[0]][0]['capacity'] += 0.5 * val_11 + 0.25 * (val_10 - val_01)
H[i[1]][0]['capacity'] += 0.5 * val_11 + 0.25 * (val_01 - val_10)
# edges from source (human energy)
H[1][i[0]]['capacity'] += 0.5 * val_00 + 0.25 * (val_01 - val_10)
H[1][i[1]]['capacity'] += 0.5 * val_00 + 0.25 * (val_10 - val_01)
if(H[1][i[0]]['capacity'] < 0):
print("Neg capacity")
break
if(H[i[1]][0]['capacity'] < 0):
print("Neg capacity")
break
if(H[1][i[1]]['capacity'] < 0):
print("Neg capacity")
break
if(H[i[0]][0]['capacity'] < 0):
print("Neg capacity")
break
for i in user_data.keys():
H[1][i]['capacity'] += user_data[i]['phi_0']
if(H[1][i]['capacity'] < 0):
print("Neg capacity")
break
H[i][0]['capacity'] += user_data[i]['phi_1']
if(H[i][0]['capacity'] < 0):
print("Neg capacity")
break
cut_value, mc = nx.minimum_cut(H, 1, 0)
# mc = [nodes dont cut source edge (bots), nodes dont cut sink edge
# (humans)]
Bots = list(mc[0])
if 0 in Bots: # wrong cut set because nodes have sink edge (humans)
print("Double check")
Bots = list(mc[1])
Bots.remove(1)
return H, Bots, user_data
def compute_bot_probabilities(rt_graph, energy_graph, bot_names):
#print("Calculate bot probability for each labeled node in retweet graph")
#start_time = time.time()
PiBotFinal = {}
for counter, node in enumerate(rt_graph.nodes()):
if counter % 1000 == 0:
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "|", "NODE:", counter)
neighbors = list(np.unique([i for i in nx.all_neighbors(energy_graph, node) if i not in [0, 1]]))
ebots = list(np.unique(np.intersect1d(neighbors, bot_names)))
ehumans = list(set(neighbors) - set(ebots))
psi_l = sum([energy_graph[node][j]['capacity'] for j in ehumans]) - \
sum([energy_graph[node][i]['capacity'] for i in ebots])
# probability to be in 1 = notPL
psi_l_bis = psi_l + energy_graph[node][0]['capacity'] - energy_graph[1][node]['capacity']
if (psi_l_bis) > 12:
PiBotFinal[node] = 0
else:
# Probability in the target (0) class
PiBotFinal[node] = 1.0 / (1 + np.exp(psi_l_bis))
#print("--- %s seconds ---" % (time.time() - start_time))
return PiBotFinal
###############################################################################
####################### COMPUTE EDGES INFORMATION #############################
###############################################################################
'''
Takes as input the RT graph and retrieves information on edges
to further build H.
'''
def getLinkDataRestrained(G, weight_attr="weight"):
'''
INPUTS:
## G (ntwkX graph)
the Retweet Graph from buildRTGraph
'''
edges = G.edges(data=True)
e_dic = dict(((x, y), z[weight_attr]) for x, y, z in edges)
link_data = []
for e in e_dic:
i = e[0]
j = e[1]
rl = False
wrl = 0
if((j, i) in e_dic.keys()):
rl = True
wrl = e_dic[(j, i)]
link_data.append([i, j, True, rl, e_dic[e], wrl])
return link_data
##########################################################################
####################### POTENTIAL FUNCTION ###############################
##########################################################################
'''
Compute joint energy potential between two users
'''
# INPUTS:
# u1 (int) ID of user u1
# u2 (int) ID of user u2
# wlr (int) number of retweets from u1 to u2
# out_graph (dict of ints) a graph that stores out degrees of accounts in retweet graph
# in_graph (dict of ints) a graph that stores in degrees of accounts in retweet graph
# alpha (list of floats)
# a list containing hyperparams (mu, alpha_out, alpha_in)
# lambda00 = ratio of psi_00 to psi_01
# lambda11 = ratio of psi_11 to psi_01
## epsilon (int)
# exponent such that lambda01=lambda11+lambda00-1+epsilon
def psi(u1, u2, wlr, in_graph, out_graph, alpha, lambda00, lambda11, epsilon):
dout_u1 = out_graph[u1] # outdegree of u1 (number of retweets it did)
din_u2 = in_graph[u2] # indegree of u2 (number of retweets it received)
if dout_u1 == 0 or din_u2 == 0:
print("Relationship problem: " + str(u1) + " --> " + str(u2))
temp = alpha[1] / float(dout_u1) - 1 + alpha[2] / float(din_u2) - 1
if temp < 10:
psi_01 = wlr * alpha[0] / (1 + np.exp(temp))
else:
psi_01 = 0
lambda01 = 1
lambda10 = lambda00 + lambda11 - 1 + epsilon
psi_00 = lambda00 * psi_01
psi_01 = lambda01 * psi_01
psi_10 = lambda10 * psi_01
psi_11 = lambda11 * psi_01
return [psi_00, psi_01, psi_10, psi_11]
| 8,570 | 33.011905 | 121 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/botcode_v2/investigation.py |
import os
import datetime
import time
from dotenv import load_dotenv
import numpy as np
from pandas import DataFrame
import matplotlib.pyplot as plt
#from sklearn import metrics
#from scipy.sparse import csc_matrix
from conftest import compile_mock_rt_graph
from app.decorators.number_decorators import fmt_n, fmt_pct
from app.friend_graphs.graph_analyzer import GraphAnalyzer
from app.botcode_v2.network_classifier_helper import getLinkDataRestrained as get_link_data_restrained
from app.botcode_v2.network_classifier_helper import psi as link_energy
from app.botcode_v2.network_classifier_helper import computeH as compile_energy_graph
from app.botcode_v2.network_classifier_helper import compute_bot_probabilities
load_dotenv()
DRY_RUN = (os.getenv("DRY_RUN", default="true") == "true")
if __name__ == "__main__":
#
# LOAD GRAPH (GIVEN JOB ID)
#
print("----------------")
print("DRY RUN:", DRY_RUN)
manager = GraphAnalyzer()
if DRY_RUN:
rt_graph = compile_mock_rt_graph()
print("RT GRAPH:", type(rt_graph))
print(" NODES:", fmt_n(rt_graph.number_of_nodes()))
print(" EDGES:", fmt_n(rt_graph.number_of_edges()))
else:
rt_graph = manager.graph
manager.report()
print("----------------")
in_degrees = rt_graph.in_degree(weight="rt_count")
out_degrees = rt_graph.out_degree(weight="rt_count")
in_degrees_list = [x[1] for x in in_degrees]
out_degrees_list = [x[1] for x in out_degrees]
print("MAX IN:", fmt_n(max(in_degrees_list))) #> 76,617
print("MAX OUT:", fmt_n(max(out_degrees_list))) #> 5,608
mu = 1
percentile = 0.999
alpha_in = np.quantile(in_degrees_list, percentile)
alpha_out = np.quantile(out_degrees_list, percentile)
print("ALPHA IN:", fmt_n(alpha_in)) #> 2,252
print("ALPHA OUT:", fmt_n(alpha_out)) #> 1,339
alpha = [mu, alpha_out, alpha_in]
epsilon = 10**(-3) #> 0.001
#lambda01 = 1
lambda00 = 0.61 # using this as a link energy param
lambda11 = 0.83 # using this as a link energy param
#lambda10 = lambda00 + lambda11 - lambda01 + epsilon
#
# CREATE ENERGY GRAPH
#
print("----------------")
links = get_link_data_restrained(rt_graph, weight_attr="rt_count") # this step is unnecessary?
print("LINKS:", fmt_n(len(links)))
energies = [(
link[0],
link[1],
link_energy(link[0], link[1], link[4], in_degrees, out_degrees, alpha, lambda00, lambda11, epsilon) # just loop through edges and pass the edges and "rt_count" data here?
) for link in links]
print("ENERGIES:", fmt_n(len(energies)))
print("----------------")
prior_probabilities = dict.fromkeys(list(rt_graph.nodes), 0.5) # set all screen names to 0.5
energy_graph, bot_names, user_data = compile_energy_graph(rt_graph, prior_probabilities, energies, out_degrees, in_degrees)
#human_names = list(set(rt_graph.nodes()) - set(bot_names))
print("ENERGY GRAPH:", type(energy_graph))
print("NODES:", fmt_n(energy_graph.number_of_nodes()))
print(f"BOTS: {fmt_n(len(bot_names))} ({fmt_pct(len(bot_names) / energy_graph.number_of_nodes())})")
#
# BOT CLASSIFICATION
#
print("----------------")
print("BOT CLASSIFICATION...")
bot_probabilities = compute_bot_probabilities(rt_graph, energy_graph, bot_names)
print("CLASSIFICATION COMPLETE!")
#
# WRITE TO FILE
#
df = DataFrame(list(bot_probabilities.items()), columns=["screen_name", "bot_probability"])
print(df.head())
csv_filepath = os.path.join(manager.local_dirpath, "preds2", "bot_probabilities.csv")
df.to_csv(csv_filepath)
print("WRITING TO FILE...")
print(csv_filepath)
#
# HISTOGRAM
#
print("LESS THAN 50%:", len(df[df.bot_probability < 0.5]))
print("EQUAL TO 50%:", len(df[df.bot_probability == 0.5]))
print("GREATHER THAN 50%:", len(df[df.bot_probability > 0.5]))
data = df.bot_probability
num_bins = round(len(data) / 10)
counts, bin_edges = np.histogram(data, bins=num_bins) # ,normed=True #> "VisibleDeprecationWarning: Passing `normed=True` on non-uniform bins has always been broken"...
cdf = np.cumsum(counts)
plt.plot(bin_edges[1:], cdf / cdf[-1])
plt.grid()
plt.xlabel("Bot probability")
plt.ylabel("CDF")
plt.hist(df.bot_probability[df.bot_probability < 0.5])
plt.hist(df.bot_probability[df.bot_probability > 0.5])
plt.grid()
plt.xlabel("Bot probability")
plt.ylabel("Frequency")
plt.title("No 0.5 probability users")
img_filepath = os.path.join(manager.local_dirpath, "preds2", "bot_probabilities_histogram.png")
plt.savefig(img_filepath)
plt.show()
| 4,726 | 29.496774 | 178 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/toxicity/checkpoint_scorer.py |
import os
from functools import lru_cache
#from pprint import pprint
import gc
from dotenv import load_dotenv
from app import server_sleep, seek_confirmation
from app.decorators.number_decorators import fmt_n
from app.bq_service import BigQueryService, generate_timestamp, split_into_batches
from app.toxicity.model_manager import ModelManager, CHECKPOINT_NAME
load_dotenv()
LIMIT = int(os.getenv("LIMIT", default="25_000")) # number of records to fetch from bq at a time
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="1_000")) # number of texts to score at a time (ideal is 1K, see README)
class ToxicityScorer:
def __init__(self, limit=LIMIT, batch_size=BATCH_SIZE, bq_service=None, model_manager=None):
self.limit = limit
self.batch_size = batch_size
self.bq_service = bq_service or BigQueryService()
self.mgr = model_manager or ModelManager()
print("----------------")
print("TOXICITY SCORER...")
print(" MODEL CHECKPOINT:", self.mgr.checkpoint_name.upper(), self.mgr.checkpoint_url)
print(" SCORES TABLE NAME:", self.scores_table_name)
print(" LIMIT:", fmt_n(self.limit))
print(" BATCH SIZE:", fmt_n(self.batch_size))
self.predict = self.mgr.predict_scores # method alias
seek_confirmation()
def perform(self):
self.mgr.load_model_state()
print("----------------")
print(f"FETCHING TEXTS...")
print(f"SCORING TEXTS IN BATCHES...")
batch = []
counter = 0
for row in self.fetch_texts():
batch.append(row)
if len(batch) >= self.batch_size:
counter += len(batch)
print(" ", generate_timestamp(), "|", fmt_n(counter))
self.process_batch(batch)
batch = []
# process final (potentially incomplete) batch
if any(batch):
counter += len(batch)
print(" ", generate_timestamp(), "|", fmt_n(counter))
self.process_batch(batch)
batch = []
#del batch
#gc.collect()
def fetch_texts(self):
sql = f"""
SELECT DISTINCT
txt.status_text_id
,txt.status_text
FROM `{self.bq_service.dataset_address}.status_texts` txt
LEFT JOIN `{self.scores_table_name}` scores ON scores.status_text_id = txt.status_text_id
WHERE scores.status_text_id IS NULL
"""
if self.limit:
sql += f" LIMIT {int(self.limit)} "
return self.bq_service.execute_query(sql) # API call
def process_batch(self, batch):
texts = []
text_ids = []
for text_row in batch:
texts.append(text_row["status_text"])
text_ids.append(text_row["status_text_id"])
results = self.predict(texts)
score_rows = []
for text_id, result in zip(text_ids, results):
score_row = result.round(8).tolist()
score_row.insert(0, text_id) # adds the text_id to the front of the list (proper column order -- see table definition)
score_rows.append(score_row)
self.save_scores(score_rows)
def save_scores(self, values):
"""Params : values (list of lists corresponding with the proper column order)"""
return self.bq_service.client.insert_rows(self.scores_table, values) # API call
@property
@lru_cache(maxsize=None)
def scores_table(self):
return self.bq_service.client.get_table(self.scores_table_name) # API call
@property
@lru_cache(maxsize=None)
def scores_table_name(self):
model_name = self.mgr.checkpoint_name.lower().replace("-","_").replace(";","") # using this model name in queries, so be super safe about SQL injection, although its not a concern right now
#return f"{self.bq_service.dataset_address}.toxicity_scores_{model_name}"
return f"{self.bq_service.dataset_address}.toxicity_scores_{model_name}_ckpt"
def count_scores(self):
sql = f"""
SELECT count(DISTINCT status_text_id) as scored_text_count
FROM `{self.scores_table_name}`
"""
results = self.bq_service.execute_query(sql) # API call
return list(results)[0]["scored_text_count"]
if __name__ == "__main__":
scorer = ToxicityScorer()
print("----------------")
print("SCORES COUNT:", fmt_n(scorer.count_scores()))
scorer.perform()
print("----------------")
print("JOB COMPLETE!")
print("----------------")
print("SCORES COUNT:", fmt_n(scorer.count_scores()))
del scorer
gc.collect()
server_sleep(seconds=5*60) # give the server a break before restarting
| 4,743 | 31.272109 | 197 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/toxicity/model_manager.py |
#
# adapted from: https://github.com/unitaryai/detoxify/blob/master/detoxify/detoxify.py
#
# using the pre-trained toxicity models provided via Detoxify checkpoints, but...
# 1) let's try different / lighter torch requirement approaches (to enable installation on heroku) - see requirements.txt file
# 2) let's also try to return the raw scores (to save processing time)
#
# references:
# https://github.com/unitaryai/detoxify/blob/master/detoxify/detoxify.py
# https://pytorch.org/docs/stable/hub.html
# https://pytorch.org/docs/stable/hub.html#torch.hub.load_state_dict_from_url
# https://pytorch.org/docs/stable/generated/torch.no_grad.html
#
import os
from pprint import pprint
from functools import lru_cache
from dotenv import load_dotenv
import torch
import transformers
from pandas import DataFrame
load_dotenv()
CHECKPOINT_NAME = os.getenv("CHECKPOINT_NAME", default="original") # "original" or "unbiased" (see README)
CHECKPOINT_URLS = {
"original": "https://github.com/unitaryai/detoxify/releases/download/v0.1-alpha/toxic_original-c1212f89.ckpt",
"unbiased": "https://github.com/unitaryai/detoxify/releases/download/v0.1-alpha/toxic_bias-4e693588.ckpt",
#"multilingual": "https://github.com/unitaryai/detoxify/releases/download/v0.1-alpha/toxic_multilingual-bbddc277.ckpt",
#"original-small": "https://github.com/unitaryai/detoxify/releases/download/v0.1.2/original-albert-0e1d6498.ckpt",
#"unbiased-small": "https://github.com/unitaryai/detoxify/releases/download/v0.1.2/unbiased-albert-c8519128.ckpt"
}
class ModelManager:
def __init__(self, checkpoint_name=None):
self.checkpoint_name = checkpoint_name or CHECKPOINT_NAME
self.checkpoint_url = CHECKPOINT_URLS[self.checkpoint_name]
self.model_state = None
self.state_dict = None
self.config = None
self.tokenizer_name = None
self.model_name = None
self.model_type = None
self.num_classes = None
self.class_names = None
def load_model_state(self):
"""Loads pre-trained model from saved checkpoint metadata."""
if not self.model_state:
print("---------------------------")
print("LOADING MODEL STATE...")
# see: https://pytorch.org/docs/stable/hub.html#torch.hub.load_state_dict_from_url
self.model_state = torch.hub.load_state_dict_from_url(self.checkpoint_url, map_location="cpu")
self.state_dict = self.model_state["state_dict"]
self.config = self.model_state["config"]
self.tokenizer_name = self.config["arch"]["args"]["tokenizer_name"] #> BertTokenizer
self.model_name = self.config["arch"]["args"]["model_name"] #> BertForSequenceClassification
self.model_type = self.config["arch"]["args"]["model_type"] #> bert-base-uncased
self.num_classes = self.config["arch"]["args"]["num_classes"] #> 6
self.class_names = self.config["dataset"]["args"]["classes"] #> ['toxicity', 'severe_toxicity', 'obscene', 'threat', 'insult', 'identity_hate']
print("---------------------------")
print("MODEL TYPE:", self.model_type)
print("MODEL NAME:", self.model_name)
print("TOKENIZER NAME:", self.tokenizer_name)
print(f"CLASS NAMES ({self.num_classes}):", self.class_names)
@property
@lru_cache(maxsize=None)
def model(self):
if not self.model_state and self.model_name and self.model_type and self.num_classes and self.state_dict:
self.load_model_state()
# see: https://huggingface.co/transformers/main_classes/model.html#transformers.PreTrainedModel.from_pretrained
return getattr(transformers, self.model_name).from_pretrained(
pretrained_model_name_or_path=None,
config=self.model_type,
num_labels=self.num_classes,
state_dict=self.state_dict,
_fast_init=False
)
@property
@lru_cache(maxsize=None)
def tokenizer(self):
if not self.model_state and self.tokenizer_name and self.model_type:
self.load_model_state()
return getattr(transformers, self.tokenizer_name).from_pretrained(self.model_type)
@torch.no_grad()
def predict_scores(self, texts):
"""Returns the raw scores, without formatting (for those desiring a faster experience)."""
self.model.eval()
inputs = self.tokenizer(texts, return_tensors="pt", truncation=True, padding=True).to(self.model.device)
out = self.model(**inputs)[0]
scores = torch.sigmoid(out).cpu().detach().numpy()
return scores
def predict_records(self, texts):
"""Optional, if you want the scores returned as a list of dict, with the texts in there as well."""
records = []
for i, score_row in enumerate(self.predict_scores(texts)):
record = {}
record["text"] = texts[i]
for class_index, class_name in enumerate(self.class_names):
record[class_name] = float(score_row[class_index])
records.append(record)
return records
def predict_df(self, texts):
"""Optional, if you want the scores returned as a dataframe."""
return DataFrame(self.predict_records(texts))
if __name__ == '__main__':
texts = [
"RT @realDonaldTrump: Crazy Nancy Pelosi should spend more time in her decaying city and less time on the Impeachment Hoax! https://t.co/eno…",
"RT @SpeakerPelosi: The House cannot choose our impeachment managers until we know what sort of trial the Senate will conduct. President Tr…",
]
mgr = ModelManager()
mgr.load_model_state()
print("------------")
print("MODEL:", type(mgr.model))
print("TOKENIZER:", type(mgr.tokenizer))
scores = mgr.predict_scores(texts)
print("------------")
print("SCORES:", type(scores), scores.shape)
print(scores[0])
records = mgr.predict_records(texts)
print("------------")
print("RECORDS:", type(records), len(records))
print(records[0])
| 6,115 | 39.773333 | 155 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/toxicity/scorer_async.py |
import os
from threading import current_thread, BoundedSemaphore
from concurrent.futures import ThreadPoolExecutor, as_completed # see: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor
from dotenv import load_dotenv
from app import server_sleep
from app.decorators.number_decorators import fmt_n
from app.bq_service import generate_timestamp, split_into_batches
from app.toxicity.scorer import ToxicityScorer
load_dotenv()
MAX_THREADS = int(os.getenv("MAX_THREADS", default=3)) # the max number of threads to use, for concurrent processing
class ToxicityScorerAsync(ToxicityScorer):
def process_batch_async(self, batch):
print("PROCESSING BATCH OF TEXTS...", generate_timestamp(), " | ", len(batch), " | ", current_thread().name)
self.process_batch(batch)
def perform_async(self, max_threads=MAX_THREADS):
print("----------------")
print(f"FETCHING TEXTS...")
rows = list(self.fetch_texts())
print(f"ASSEMBLING BATCHES...")
batches = list(split_into_batches(rows, batch_size=self.batch_size))
print(f"SCORING TEXTS IN BATCHES...")
with ThreadPoolExecutor(max_workers=max_threads, thread_name_prefix="THREAD") as executor:
#lock = BoundedSemaphore()
futures = [executor.submit(self.process_batch_async, batch) for batch in batches]
print("BATCHES WILL PROCESS:", len(futures))
for future in as_completed(futures):
#lock.acquire()
future.result()
#lock.release()
print("----------------")
print("ASYNC PERFORMANCE COMPLETE...")
if __name__ == "__main__":
scorer = ToxicityScorerAsync()
print("----------------")
print("SCORES COUNT:", fmt_n(scorer.count_scores()))
scorer.perform_async()
print("----------------")
print("JOB COMPLETE!")
print("----------------")
print("SCORES COUNT:", fmt_n(scorer.count_scores()))
server_sleep(seconds=10*60) # give the server a break before restarting
| 2,081 | 32.580645 | 166 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/toxicity/investigate_models.py |
from pprint import pprint
from detoxify import Detoxify
from pandas import DataFrame
if __name__ == '__main__':
texts = [
"RT @realDonaldTrump: I was very surprised & disappointed that Senator Joe Manchin of West Virginia voted against me on the Democrat’s total…",
"RT @realDonaldTrump: Crazy Nancy Pelosi should spend more time in her decaying city and less time on the Impeachment Hoax! https://t.co/eno…",
"RT @SpeakerPelosi: The House cannot choose our impeachment managers until we know what sort of trial the Senate will conduct. President Tr…",
"RT @RepAdamSchiff: Lt. Col. Vindman did his job. As a soldier in Iraq, he received a Purple Heart. Then he displayed another rare form o…"
]
# original: bert-base-uncased / Toxic Comment Classification Challenge
original = Detoxify("original")
# unbiased: roberta-base / Unintended Bias in Toxicity Classification
unbiased = Detoxify("unbiased")
for text in texts:
print("----------------")
print(f"TEXT: '{text}'")
original_results = original.predict(text)
#original_results["text"] = text
original_results["model"] = "original"
unbiased_results = unbiased.predict(text)
#unbiased_results["text"] = text
unbiased_results["model"] = "unbiased"
print(f"SCORES:")
records = [original_results, unbiased_results]
df = DataFrame(records, columns=["toxicity", "severe_toxicity", "obscene", "threat", "insult", "identity_hate", "model"])
print(df.round(8).head())
| 1,583 | 38.6 | 155 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/toxicity/scorer.py |
import os
from functools import lru_cache
#from pprint import pprint
from dotenv import load_dotenv
from detoxify import Detoxify
from pandas import DataFrame
from app import server_sleep
from app.decorators.number_decorators import fmt_n
from app.bq_service import BigQueryService, generate_timestamp, split_into_batches
load_dotenv()
MODEL_NAME = os.getenv("MODEL_NAME", default="original") # "original" or "unbiased" (see README)
LIMIT = int(os.getenv("LIMIT", default="25_000")) # number of records to fetch from bq at a time
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="1_000")) # number of texts to score at a time (ideal is 1K, see README)
class ToxicityScorer:
def __init__(self, model_name=MODEL_NAME, limit=LIMIT, batch_size=BATCH_SIZE, bq_service=None):
self.model_name = model_name.lower().replace(";","") # using this model name in queries, so be super safe about SQL injection, although its not a concern right now
self.limit = limit
self.batch_size = batch_size
self.bq_service = bq_service or BigQueryService()
print("----------------")
print("TOXICITY SCORER...")
print(" MODEL:", self.model_name.upper())
print(" LIMIT:", fmt_n(self.limit))
print(" BATCH SIZE:", fmt_n(self.batch_size))
@property
@lru_cache(maxsize=None)
def scores_table_name(self):
return f"{self.bq_service.dataset_address}.toxicity_scores_{self.model_name}"
def count_scores(self):
sql = f"""
SELECT count(DISTINCT status_text_id) as text_count
FROM `{self.scores_table_name}`
"""
results = self.bq_service.execute_query(sql) # API call
return list(results)[0]["text_count"]
def fetch_texts(self):
sql = f"""
SELECT DISTINCT
txt.status_text_id
,txt.status_text
FROM `{self.bq_service.dataset_address}.status_texts` txt
LEFT JOIN `{self.scores_table_name}` scores ON scores.status_text_id = txt.status_text_id
WHERE scores.status_text_id IS NULL
"""
if self.limit:
sql += f" LIMIT {int(self.limit)} "
return self.bq_service.execute_query(sql) # API call
@property
@lru_cache(maxsize=None)
def model(self):
return Detoxify(self.model_name)
@property
@lru_cache(maxsize=None)
def scores_table_colnames(self):
"""Returns the column names in the proper order."""
return ["status_text_id"] + self.model.class_names
#def save_score_records(self, records):
# self.bq_service.insert_records_in_batches(self.scores_table, records) # API call
def save_scores(self, values):
"""Params : values (list of lists corresponding with the proper column order)"""
return self.bq_service.client.insert_rows(self.scores_table, values) # API call
@property
@lru_cache(maxsize=None)
def scores_table(self):
return self.bq_service.client.get_table(self.scores_table_name) # API call
def process_batch(self, batch):
scores = self.model.predict([row["status_text"] for row in batch])
scores["status_text_id"] = [row["status_text_id"] for row in batch]
scores_df = DataFrame(scores)
# reorder columns for BQ (so they save properly):
scores_df = scores_df.reindex(self.scores_table_colnames, axis="columns")
# round scores, to reduce storage requirements:
for scores_col in self.model.class_names:
scores_df[scores_col] = scores_df[scores_col].round(8)
#self.save_score_records(scores_df.to_dict("records"))
self.save_scores(scores_df.to_dict(orient="split")["data"])
def perform(self):
print("----------------")
print(f"FETCHING TEXTS...")
rows = list(self.fetch_texts())
print(f"ASSEMBLING BATCHES...")
batches = list(split_into_batches(rows, batch_size=self.batch_size))
print(f"SCORING TEXTS IN BATCHES...")
counter = 0
for index, batch in enumerate(batches):
counter += len(batch)
print(" ", generate_timestamp(), f"BATCH {index+1}", f"| {fmt_n(counter)}")
self.process_batch(batch)
def perform_better(self):
print("----------------")
print(f"FETCHING TEXTS...")
print(f"SCORING TEXTS IN BATCHES...")
batch = []
counter = 0
for row in self.fetch_texts():
batch.append(row)
if len(batch) >= self.batch_size:
counter+=len(batch)
print(" ", generate_timestamp(), "|", fmt_n(counter))
self.process_batch(batch)
batch = []
if __name__ == "__main__":
scorer = ToxicityScorer()
print("----------------")
print("SCORES COUNT:", fmt_n(scorer.count_scores()))
#scorer.perform()
scorer.perform_better()
#scorer.perform_better_timed()
#duration_seconds = scorer.perform_better_timed()
#items_per_second = round(scorer.limit / duration_seconds, 2)
#print(f"PROCESSED {fmt_n(scorer.limit)} ITEMS IN {duration_seconds} SECONDS ({items_per_second} ITEMS / SECOND)")
print("----------------")
print("JOB COMPLETE!")
print("----------------")
print("SCORES COUNT:", fmt_n(scorer.count_scores()))
server_sleep(seconds=10*60) # give the server a break before restarting
| 5,428 | 35.436242 | 171 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/toxicity/investigate_benchmarks.py |
from time import perf_counter # see: https://stackoverflow.com/questions/25785243/understanding-time-perf-counter-and-time-process-time
from functools import wraps
from detoxify import Detoxify
def performance_timer(func):
"""
Wrap your function in this decorator to see how long it takes.
Returns the duration in seconds.
The original function's return values are lost - just have it do the work.
"""
@wraps(func)
def wrapper(*args, **kwds):
start_at = perf_counter()
func(*args, **kwds)
end_at = perf_counter()
duration_seconds = round(end_at - start_at, 2)
return duration_seconds
return wrapper
LIMITS = [
1, 10, 100,
500, 750,
1_000,
1_250, 1_500,
#2_500, 5_000, 7_500,
#10_000 #, 100_000, #1_000_000
]
@performance_timer
def scoring_duration(model, texts):
model.predict(texts)
if __name__ == '__main__':
print("---------------------")
model_name = "original"
model = Detoxify(model_name)
print("MODEL:", model_name.upper())
print(model.class_names)
for limit in LIMITS:
print("---------------------")
texts = ["some example text - what a jerk" for _ in range(0, limit)]
duration_seconds = scoring_duration(model, texts)
items_per_second = round(limit / duration_seconds, 2)
print(f"PROCESSED {limit} ITEMS IN {duration_seconds} SECONDS ({items_per_second} items / second)")
print("---------------------")
| 1,494 | 25.696429 | 135 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/toxicity/checkpoint_scorer_async.py |
import os
from threading import current_thread #, BoundedSemaphore
from concurrent.futures import ThreadPoolExecutor, as_completed # see: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor
import gc
from dotenv import load_dotenv
from app import server_sleep
from app.decorators.number_decorators import fmt_n
from app.bq_service import generate_timestamp, split_into_batches
from app.toxicity.checkpoint_scorer import ToxicityScorer
load_dotenv()
MAX_THREADS = int(os.getenv("MAX_THREADS", default=10)) # the max number of threads to use, for concurrent processing
class ToxicityScorerAsync(ToxicityScorer):
def process_batch_async(self, batch):
print("PROCESSING BATCH OF TEXTS...", generate_timestamp(), " | ", len(batch), " | ", current_thread().name)
self.process_batch(batch)
def perform_async(self, max_threads=MAX_THREADS):
self.mgr.load_model_state()
print("----------------")
print(f"FETCHING TEXTS...")
rows = list(self.fetch_texts())
print(f"ASSEMBLING BATCHES...")
batches = list(split_into_batches(rows, batch_size=self.batch_size))
print(f"SCORING TEXTS IN BATCHES...")
with ThreadPoolExecutor(max_workers=max_threads, thread_name_prefix="THREAD") as executor:
#lock = BoundedSemaphore()
futures = [executor.submit(self.process_batch_async, batch) for batch in batches]
print("BATCHES WILL PROCESS:", len(futures))
for future in as_completed(futures):
#lock.acquire()
future.result()
#lock.release()
print("----------------")
print("ASYNC PERFORMANCE COMPLETE...")
if __name__ == "__main__":
scorer = ToxicityScorerAsync()
print("----------------")
print("SCORES COUNT:", fmt_n(scorer.count_scores()))
scorer.perform_async()
print("----------------")
print("JOB COMPLETE!")
print("----------------")
print("SCORES COUNT:", fmt_n(scorer.count_scores()))
del scorer
gc.collect()
server_sleep(seconds=5*60) # give the server a break before restarting
| 2,172 | 31.924242 | 166 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/graph_storage.py |
import os
import json
import pickle
from sys import getsizeof
from memory_profiler import profile #, memory_usage
from pprint import pprint
from pandas import DataFrame
from networkx import write_gpickle, read_gpickle
from dotenv import load_dotenv
from conftest import compile_mock_rt_graph
from app import DATA_DIR, seek_confirmation
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.gcs_service import GoogleCloudStorageService
from conftest import compile_mock_rt_graph
load_dotenv()
DIRPATH = os.getenv("DIRPATH", default="graphs/mock_graph")
DRY_RUN = (os.getenv("DRY_RUN", default="false") == "true")
WIFI_ENABLED = (os.getenv("WIFI_ENABLED", default="true") == "true")
class GraphStorage:
def __init__(self, dirpath=None, gcs_service=None):
"""
Saves and loads artifacts from the networkx graph compilation process, using local storage and/or Google Cloud Storage.
Params:
dirpath (str) like "graphs/my_graph/123"
TODO: bot probability stuff only apples to bot retweet graphs, and should probably be moved into a child graph storage class
"""
self.gcs_service = gcs_service or GoogleCloudStorageService()
self.dirpath = dirpath or DIRPATH
self.gcs_dirpath = os.path.join("storage", "data", self.dirpath)
self.local_dirpath = os.path.join(DATA_DIR, self.dirpath) # TODO: to make compatible on windows, split the dirpath on "/" and re-join using os.sep
print("-------------------------")
print("GRAPH STORAGE...")
print(" DIRPATH:", self.dirpath)
print(" GCS DIRPATH:", self.gcs_dirpath)
print(" LOCAL DIRPATH:", os.path.abspath(self.local_dirpath))
print(" WIFI ENABLED:", WIFI_ENABLED)
seek_confirmation()
if not os.path.exists(self.local_dirpath):
os.makedirs(self.local_dirpath)
self.results = None
self.graph = None
@property
def metadata(self):
return {
"dirpath": self.dirpath,
#"local_dirpath": os.path.abspath(self.local_dirpath),
#"gcs_dirpath": self.gcs_dirpath,
"gcs_service": self.gcs_service.metadata,
"wifi_enabled": WIFI_ENABLED
}
#
# LOCAL STORAGE
#
@property
def local_metadata_filepath(self):
return os.path.join(self.local_dirpath, "metadata.json")
@property
def local_results_filepath(self):
return os.path.join(self.local_dirpath, "results.csv")
@property
def local_graph_filepath(self):
return os.path.join(self.local_dirpath, "graph.gpickle")
@property
def local_bot_probabilities_filepath(self):
return os.path.join(self.local_dirpath, "bot_probabilities.csv")
@property
def local_bot_probabilities_histogram_filepath(self):
return os.path.join(self.local_dirpath, "bot_probabilities_histogram.png")
def write_metadata_to_file(self):
print(logstamp(), "WRITING METADATA...")
with open(self.local_metadata_filepath, "w") as f:
json.dump(self.metadata, f)
def write_results_to_file(self):
print(logstamp(), "WRITING RESULTS...")
df = DataFrame(self.results)
df.index.name = "row_id"
df.index = df.index + 1
df.to_csv(self.local_results_filepath)
def write_graph_to_file(self):
print(logstamp(), "WRITING GRAPH...")
write_gpickle(self.graph, self.local_graph_filepath)
def read_graph_from_file(self):
print(logstamp(), "READING GRAPH...")
return read_gpickle(self.local_graph_filepath)
#
# REMOTE STORAGE
#
def upload_file(self, local_filepath, remote_filepath):
print(logstamp(), "UPLOADING FILE...", os.path.abspath(local_filepath))
blob = self.gcs_service.upload(local_filepath, remote_filepath)
print(logstamp(), blob) #> <Blob: impeachment-analysis-2020, storage/data/2020-05-26-0002/metadata.json, 1590465770194318>
def download_file(self, remote_filepath, local_filepath):
print(logstamp(), "DOWNLOADING FILE...", remote_filepath)
self.gcs_service.download(remote_filepath, local_filepath)
@property
def gcs_metadata_filepath(self):
return os.path.join(self.gcs_dirpath, "metadata.json")
@property
def gcs_results_filepath(self):
return os.path.join(self.gcs_dirpath, "results.csv")
@property
def gcs_graph_filepath(self):
return os.path.join(self.gcs_dirpath, "graph.gpickle")
@property
def gcs_bot_probabilities_filepath(self):
return os.path.join(self.gcs_dirpath, "bot_probabilities.csv")
@property
def gcs_bot_probabilities_histogram_filepath(self):
return os.path.join(self.gcs_dirpath, "bot_probabilities_histogram.png")
def upload_metadata(self):
self.upload_file(self.local_metadata_filepath, self.gcs_metadata_filepath)
def upload_results(self):
self.upload_file(self.local_results_filepath, self.gcs_results_filepath)
def upload_graph(self):
self.upload_file(self.local_graph_filepath, self.gcs_graph_filepath)
def upload_bot_probabilities(self):
self.upload_file(self.local_bot_probabilities_filepath, self.gcs_bot_probabilities_filepath)
def upload_bot_probabilities_histogram(self):
self.upload_file(self.local_bot_probabilities_histogram_filepath, self.gcs_bot_probabilities_histogram_filepath)
def download_graph(self):
self.download_file(self.gcs_graph_filepath, self.local_graph_filepath)
def download_bot_probabilities(self):
self.download_file(self.gcs_bot_probabilities_filepath, self.local_bot_probabilities_filepath)
def download_bot_probabilities_histogram(self):
self.download_file(self.gcs_bot_probabilities_histogram_filepath, self.local_bot_probabilities_histogram_filepath)
#
# CONVENIENCE METHODS
#
def save_metadata(self):
self.write_metadata_to_file()
if WIFI_ENABLED:
self.upload_metadata()
def save_results(self):
self.write_results_to_file()
if WIFI_ENABLED:
self.upload_results()
def save_graph(self):
self.write_graph_to_file()
if WIFI_ENABLED:
self.upload_graph()
#
# GRAPH LOADING AND ANALYSIS
#
@profile
def load_graph(self):
"""Assumes the graph already exists and is saved locally or remotely"""
if not os.path.isfile(self.local_graph_filepath):
self.download_graph()
return self.read_graph_from_file()
@property
def node_count(self):
return self.graph.number_of_nodes()
@property
def edge_count(self):
return self.graph.number_of_edges()
def report(self):
if not self.graph:
self.graph = self.load_graph()
print("-------------------")
print(type(self.graph))
print(" NODES:", fmt_n(self.node_count))
print(" EDGES:", fmt_n(self.edge_count))
print("-------------------")
@property
def memory_report(self):
if not self.graph:
self.graph = self.load_graph()
#memory_load = memory_usage(self.read_graph_from_file, interval=.2, timeout=1)
file_size = os.path.getsize(self.local_graph_filepath) # in bytes
print("-------------------")
print(type(self.graph))
print(" NODES:", fmt_n(self.node_count))
print(" EDGES:", fmt_n(self.edge_count))
print(" FILE SIZE:", fmt_n(file_size))
print("-------------------")
return {"nodes": self.node_count, "edges": self.edge_count, "file_size": file_size}
#@property
#def graph_metadata(self):
# return {"nodes": self.node_count, "edges": self.edge_count}
if __name__ == "__main__":
storage = GraphStorage()
if DRY_RUN:
storage.graph = compile_mock_rt_graph()
storage.report()
storage.write_graph_to_file()
storage.graph = None
storage.load_graph()
storage.report()
| 8,149 | 31.213439 | 154 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/retweet_grapher.py |
import os
from datetime import datetime
import time
from memory_profiler import profile
from dotenv import load_dotenv
from networkx import DiGraph
from conftest import compile_mock_rt_graph
from app import APP_ENV, DATA_DIR, SERVER_NAME, SERVER_DASHBOARD_URL, seek_confirmation
from app.decorators.number_decorators import fmt_n
from app.decorators.datetime_decorators import dt_to_s, logstamp
from app.bq_service import BigQueryService
from app.retweet_graphs_v2.graph_storage import GraphStorage
from app.retweet_graphs_v2.job import Job
#from app.email_service import send_email
load_dotenv()
TOPIC = os.getenv("TOPIC") # default is None
USERS_LIMIT = os.getenv("USERS_LIMIT") # default is None
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="25000")) # it doesn't refer to the size of the batches fetched from BQ but rather the interval at which to take a reporting snapshot, which gets compiled and written to CSV. set this to a very large number like 25000 to keep memory costs down, if that's a concern for you.
TWEETS_START_AT = os.getenv("TWEETS_START_AT") # default is None
TWEETS_END_AT = os.getenv("TWEETS_END_AT") # default is None
DRY_RUN = (os.getenv("DRY_RUN", default="false") == "true")
class RetweetGrapher(GraphStorage, Job):
def __init__(self, topic=TOPIC, tweets_start_at=TWEETS_START_AT, tweets_end_at=TWEETS_END_AT,
users_limit=USERS_LIMIT, batch_size=BATCH_SIZE,
storage_dirpath=None, bq_service=None):
Job.__init__(self)
GraphStorage.__init__(self, dirpath=storage_dirpath)
self.bq_service = bq_service or BigQueryService()
self.fetch_edges = self.bq_service.fetch_retweet_edges_in_batches_v2 # just being less verbose. feels like javascript
# CONVERSATION PARAMS (OPTIONAL)
self.topic = topic
self.tweets_start_at = tweets_start_at
self.tweets_end_at = tweets_end_at
# PROCESSING PARAMS
self.users_limit = users_limit
if self.users_limit:
self.users_limit = int(self.users_limit)
self.batch_size = int(batch_size)
print("-------------------------")
print("RETWEET GRAPHER...")
print(" USERS LIMIT:", self.users_limit)
print(" BATCH SIZE:", self.batch_size)
print(" DRY RUN:", DRY_RUN)
print("-------------------------")
print("CONVERSATION PARAMS...")
print(" TOPIC:", self.topic)
print(" TWEETS START:", self.tweets_start_at)
print(" TWEETS END:", self.tweets_end_at)
seek_confirmation()
@property
def metadata(self):
return {
"app_env": APP_ENV,
"storage_dirpath": self.dirpath,
"bq_service": self.bq_service.metadata,
"topic": self.topic,
"tweets_start_at": str(self.tweets_start_at),
"tweets_end_at": str(self.tweets_end_at),
"users_limit": self.users_limit,
"batch_size": self.batch_size
}
@profile
def perform(self):
self.results = []
self.graph = DiGraph()
for row in self.fetch_edges(topic=self.topic, start_at=self.tweets_start_at, end_at=self.tweets_end_at):
self.graph.add_edge(row["user_id"], row["retweeted_user_id"], weight=row["retweet_count"])
self.counter += 1
if self.counter % self.batch_size == 0:
self.results.append(self.running_results)
if self.users_limit and self.counter >= self.users_limit:
break
@property
def running_results(self):
rr = {"ts": logstamp(),
"counter": self.counter,
"nodes": self.graph.number_of_nodes(),
"edges": self.graph.number_of_edges()
}
print(rr["ts"], "|", fmt_n(rr["counter"]), "|", fmt_n(rr["nodes"]), "|", fmt_n(rr["edges"]))
return rr
if __name__ == "__main__":
grapher = RetweetGrapher()
grapher.save_metadata()
grapher.start()
if DRY_RUN:
grapher.counter = 7500
grapher.results = [
{"ts": "2020-01-01 10:00:00", "counter": 2500, "nodes": 100_000, "edges": 150_000},
{"ts": "2020-01-01 10:00:00", "counter": 5000, "nodes": 200_000, "edges": 400_000},
{"ts": "2020-01-01 10:00:00", "counter": 7500, "nodes": 300_000, "edges": 900_000}
]
grapher.graph = compile_mock_rt_graph()
else:
grapher.perform()
grapher.end()
grapher.report()
grapher.save_results()
grapher.save_graph()
| 4,571 | 35.285714 | 322 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/job.py |
import time
from app.decorators.number_decorators import fmt_n
class Job():
def __init__(self):
self.start_at = None
self.end_at = None
self.duration_seconds = None
self.counter = 0 # represents the number of items processed
def start(self):
print("-----------------")
print("JOB STARTING!")
self.start_at = time.perf_counter() # todo: let's use a real datetime string and add it to the metadata
def end(self):
print("-----------------")
print("JOB COMPLETE!")
self.end_at = time.perf_counter() # todo: let's use a real datetime string and add it to the metadata
self.duration_seconds = round(self.end_at - self.start_at, 2)
print(f"PROCESSED {fmt_n(self.counter)} ITEMS IN {fmt_n(self.duration_seconds)} SECONDS")
if __name__ == "__main__":
job = Job()
job.start()
time.sleep(3)
job.counter = 100
job.end()
| 935 | 26.529412 | 111 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/prep/lookup_user_ids.py |
import os
import json
from tweepy.error import TweepError
from pandas import DataFrame
from dotenv import load_dotenv
from app import DATA_DIR, seek_confirmation
from app.decorators.datetime_decorators import logstamp
from app.bq_service import BigQueryService
from app.twitter_service import TwitterService
load_dotenv()
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default=100)) # the max number of processed users to store in BQ at once (with a single insert API call). must be less than 10,000 to avoid error.
if __name__ == "__main__":
bq_service = BigQueryService()
twitter_service = TwitterService()
rows = list(bq_service.fetch_idless_screen_names())
row_count = len(rows)
print("-------------------------")
print(f"FETCHED {row_count} SCREEN NAMES")
print("BATCH SIZE:", BATCH_SIZE)
print("-------------------------")
seek_confirmation()
bq_service.migrate_user_id_lookups_table()
batch = []
for index, row in enumerate(rows):
counter = index + 1
try:
user_id = twitter_service.get_user_id(row.screen_name)
message = None
except TweepError as err:
#print(err)
#> [{'code': 50, 'message': 'User not found.'}]
#> [{'code': 63, 'message': 'User has been suspended.'}]
user_id = None
message = json.loads(err.reason.replace("'", '"'))[0]["message"]
lookup = {"lookup_at": logstamp(), "counter": counter, "screen_name": row.screen_name.upper(), "user_id": user_id, "message": message}
print(lookup)
batch.append(lookup)
if (len(batch) >= BATCH_SIZE) or (counter >= row_count): # if the batch is full or the row is last
print("SAVING BATCH...", len(batch))
bq_service.upload_user_id_lookups(batch)
batch = [] # clear the batch
print("-------------")
print("LOOKUPS COMPLETE!")
#print("WRITING TO CSV...")
#df = DataFrame(lookups)
#print(df.head())
#csv_filepath = os.path.join(DATA_DIR, "user_id_lookups.csv")
#df.to_csv(csv_filepath, index=False)
| 2,114 | 32.046875 | 187 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.