repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/prep/migrate_user_details_v2.py |
from app.bq_service import BigQueryService
if __name__ == "__main__":
bq_service = BigQueryService()
bq_service.migrate_populate_user_details_table_v2()
print("MIGRATION SUCCESSFUL!")
| 202 | 15.916667 | 55 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/prep/assign_user_ids.py | from pprint import pprint
from app import seek_confirmation # DATA_DIR
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.bq_service import BigQueryService
if __name__ == "__main__":
bq_service = BigQueryService()
print("-------------------------")
screen_names = [row.screen_name for row in bq_service.fetch_idless_screen_names_postlookup()]
print("FETCHED", fmt_n(len(screen_names)), "SCREEN NAMES...") # 2,224
max_user_id = bq_service.fetch_max_user_id_postlookup()
print("-------------------------")
print("MAX ID:")
print(max_user_id)
print("-------------------------")
print("ASSIGNING IDS...")
assignments = []
for screen_name in screen_names:
max_user_id+=1
assignments.append({"screen_name": screen_name, "user_id": max_user_id})
#pprint(assignments)
print(assignments[0]["user_id"])
print("...")
print(assignments[-1]["user_id"])
print("-------------------------")
seek_confirmation()
bq_service.migrate_user_id_assignments_table()
bq_service.upload_user_id_assignments(assignments)
print("UPLOAD COMPLETE!")
| 1,192 | 30.394737 | 97 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/prep/migrate_daily_bot_probabilities.py |
from app.bq_service import BigQueryService
if __name__ == "__main__":
bq_service = BigQueryService()
bq_service.migrate_daily_bot_probabilities_table()
print("MIGRATION SUCCESSFUL!")
| 201 | 15.833333 | 54 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/prep/migrate_user_screen_names.py |
from app.bq_service import BigQueryService
if __name__ == "__main__":
bq_service = BigQueryService()
bq_service.migrate_populate_user_screen_names_table()
print("MIGRATION SUCCESSFUL!")
| 204 | 16.083333 | 57 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/prep/migrate_retweets_v2.py |
from app.decorators.datetime_decorators import logstamp
from app.bq_service import BigQueryService
if __name__ == "__main__":
bq_service = BigQueryService()
print(logstamp())
bq_service.migrate_populate_retweets_table_v2()
print(logstamp())
print("MIGRATION SUCCESSFUL!")
| 298 | 18.933333 | 55 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/k_days/reporter.py |
import os
from pandas import DataFrame
from app import DATA_DIR
from app.retweet_graphs_v2.graph_storage import GraphStorage
from app.retweet_graphs_v2.k_days.generator import DateRangeGenerator
if __name__ == "__main__":
gen = DateRangeGenerator()
reports = []
for date_range in gen.date_ranges:
storage = GraphStorage(dirpath=f"retweet_graphs_v2/k_days/{gen.k_days}/{date_range.start_date}")
try:
report = {**storage.memory_report, **{"k_days": gen.k_days, "start_date": date_range.start_date}}
reports.append(report)
except Exception as err:
print("OOPS", date_range.start_date, err)
df = DataFrame(reports)
print(df.head())
local_graph_report_filepath = os.path.join(DATA_DIR, "retweet_graphs_v2", "k_days", str(gen.k_days), "graph_reports.csv")
print("WRITING TO CSV...", os.path.abspath(local_graph_report_filepath))
df.to_csv(local_graph_report_filepath)
| 962 | 32.206897 | 125 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/k_days/classifier.py |
import os
#import time
import gc
from dotenv import load_dotenv
from app import APP_ENV, server_sleep
from app.retweet_graphs_v2.graph_storage import GraphStorage
from app.retweet_graphs_v2.k_days.generator import DateRangeGenerator
from app.botcode_v2.classifier import NetworkClassifier as BotClassifier
from app.bq_service import BigQueryService
load_dotenv()
SKIP_EXISTING = (os.getenv("SKIP_EXISTING", default="true") == "true")
if __name__ == "__main__":
bq_service = BigQueryService()
gen = DateRangeGenerator()
for date_range in gen.date_ranges:
storage_dirpath = f"retweet_graphs_v2/k_days/{gen.k_days}/{date_range.start_date}"
storage = GraphStorage(dirpath=storage_dirpath)
if SKIP_EXISTING and storage.gcs_service.file_exists(storage.gcs_bot_probabilities_histogram_filepath):
# would check for CSV file but it seems checking for larger file sometimes leads to
# urllib3.exceptions.ProtocolError: ('Connection aborted.', OSError(0, 'Error')),
# so check the smaller histogram file instead
print("FOUND EXISTING BOT PROBABILITIES. SKIPPING...")
continue # skip to next date range
print("PROCEEDING WITH CLASSIFICAITON...")
storage.report() # loads graph and provides size info
clf = BotClassifier(storage.graph, weight_attr="weight")
# UPLOAD COMPLETE CSV TO GOOGLE CLOUD STORAGE
clf.bot_probabilities_df.to_csv(storage.local_bot_probabilities_filepath)
storage.upload_bot_probabilities()
# UPLOAD COMPLETE HISTOGRAM TO GOOGLE CLOUD STORAGE
clf.generate_bot_probabilities_histogram(
img_filepath=storage.local_bot_probabilities_histogram_filepath,
show_img=(APP_ENV=="development"),
title=f"Bot Probability Scores for Period '{date_range.start_date}' (excludes 0.5)"
)
storage.upload_bot_probabilities_histogram()
# UPLOAD SELECTED ROWS TO BIG QUERY (IF POSSIBLE, OTHERWISE CAN ADD FROM GCS LATER)
try:
bots_df = clf.bot_probabilities_df[clf.bot_probabilities_df["bot_probability"] > 0.5]
records = [{**{"start_date": date_range.start_date}, **record} for record in bots_df.to_dict("records")]
print("UPLOADING", len(records), "BOT SCORES TO BQ...")
bq_service.upload_daily_bot_probabilities(records)
del bots_df
del records
except Exception as err:
print("OOPS", err)
del storage
del clf
gc.collect()
print("\n\n\n\n")
print("JOB COMPLETE!")
server_sleep()
| 2,646 | 36.814286 | 116 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/k_days/grapher.py |
from app import server_sleep
from app.bq_service import BigQueryService
from app.retweet_graphs_v2.retweet_grapher import RetweetGrapher
from app.retweet_graphs_v2.k_days.generator import DateRangeGenerator
if __name__ == "__main__":
gen = DateRangeGenerator()
bq_service = BigQueryService()
for date_range in gen.date_ranges:
storage_dirpath = f"retweet_graphs_v2/k_days/{gen.k_days}/{date_range.start_date}"
grapher = RetweetGrapher(storage_dirpath=storage_dirpath, bq_service=bq_service,
tweets_start_at=date_range.start_at, tweets_end_at=date_range.end_at
)
grapher.save_metadata()
grapher.start()
grapher.perform()
grapher.end()
grapher.report()
grapher.save_results()
grapher.save_graph()
del grapher # clearing graph from memory
print("\n\n\n\n")
print("JOB COMPLETE!")
server_sleep()
| 930 | 26.382353 | 90 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/k_days/download_classifications.py |
import os
from app.retweet_graphs_v2.graph_storage import GraphStorage
from app.retweet_graphs_v2.k_days.generator import DateRangeGenerator
from app.botcode_v2.classifier import NetworkClassifier as BotClassifier
if __name__ == "__main__":
gen = DateRangeGenerator()
for date_range in gen.date_ranges:
print("----------")
print("DATE:", date_range.start_date)
storage_dirpath = f"retweet_graphs_v2/k_days/{gen.k_days}/{date_range.start_date}"
storage = GraphStorage(dirpath=storage_dirpath)
try:
if not os.path.isfile(storage.local_bot_probabilities_filepath):
storage.download_bot_probabilities()
if not os.path.isfile(storage.local_bot_probabilities_histogram_filepath):
storage.download_bot_probabilities_histogram()
except Exception as err:
print("OOPS", date_range.start_date, err)
print("DOWNLOADED ALL!")
| 950 | 31.793103 | 90 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs_v2/k_days/generator.py |
import os
from datetime import datetime, timedelta
from pprint import pprint
from dotenv import load_dotenv
from app import seek_confirmation
from app.decorators.datetime_decorators import dt_to_date
load_dotenv()
START_DATE = os.getenv("START_DATE", default="2020-01-01") # the first period will start on this day
K_DAYS = int(os.getenv("K_DAYS", default="3")) # the length of each time period in days
N_PERIODS = int(os.getenv("N_PERIODS", default="5")) # the number of periods to construct
class DateRangeGenerator:
def __init__(self, start_date=START_DATE, k_days=K_DAYS, n_periods=N_PERIODS):
"""
Generates a list of date ranges.
Params:
start_date (str) the first period start date, like "2020-01-01"
k_days (int) number of days in each period
n_periods (int) number of periods
"""
self.start_date = start_date
self.k_days = int(k_days)
self.n_periods = int(n_periods)
print("-------------------------")
print("DATE RANGE GENERATOR...")
print(" START DATE:", self.start_date)
print(" K DAYS:", self.k_days)
print(" N PERIODS:", self.n_periods)
print("-------------------------")
print("DATE RANGES...")
self.date_ranges = self.get_date_ranges(start_date=self.start_date, k_days=self.k_days, n_periods=self.n_periods)
pprint(self.date_ranges)
seek_confirmation()
@staticmethod
def get_date_ranges(start_date, k_days, n_periods):
"""
Params:
start_date (str) date string like "2020-01-01"
k_days (int) number of days in each period
n_periods (int) number of periods
"""
date_ranges = []
period_start_at = datetime.strptime(start_date, "%Y-%m-%d")
for i in range(0, n_periods):
period_end_at = period_start_at + timedelta(days=k_days) - timedelta(seconds=1)
date_ranges.append(DateRange(period_start_at, period_end_at))
period_start_at = period_end_at + timedelta(seconds=1)
return date_ranges
class DateRange:
def __init__(self, start_at, end_at):
"""Params: start_at, end_at (datetime) like datetime(2020, 1, 31) """
self.start_at = start_at
self.end_at = end_at
def __repr__(self):
return f"<DateRange start_at='{self.start_at}' end_at={self.end_at}>"
@property
def metadata(self):
return {"start_date": self.start_date, "end_date": self.end_date}
@property
def start_date(self):
return dt_to_date(self.start_at)
@property
def end_date(self):
return dt_to_date(self.end_at)
if __name__ == "__main__":
gen = DateRangeGenerator()
| 2,759 | 31.857143 | 121 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs/bq_weekly_graph_loader.py |
from app.retweet_graphs.bq_weekly_grapher import BigQueryWeeklyRetweetGrapher
if __name__ == "__main__":
storage_service = BigQueryWeeklyRetweetGrapher.init_storage_service()
graph = storage_service.load_graph() # will print a memory profile...
storage_service.report(graph) # will print graph size
| 317 | 25.5 | 77 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs/base_grapher.py |
import os
from datetime import datetime
import time
from dotenv import load_dotenv
from networkx import DiGraph
from app import APP_ENV, DATA_DIR, SERVER_NAME, SERVER_DASHBOARD_URL
from app.decorators.number_decorators import fmt_n
from app.retweet_graphs.graph_storage_service import GraphStorageService
from app.email_service import send_email
load_dotenv()
USERS_LIMIT = os.getenv("USERS_LIMIT") # default is None
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="2500"))
class BaseGrapher():
def __init__(self, users_limit=USERS_LIMIT, batch_size=BATCH_SIZE, storage_service=None):
self.users_limit = users_limit
if self.users_limit:
self.users_limit = int(self.users_limit)
self.batch_size = batch_size
self.storage_service = storage_service or GraphStorageService()
print("-----------------")
print("BASE GRAPHER...")
print(" USERS LIMIT:", self.users_limit)
print(" BATCH SIZE:", self.batch_size)
self.start_at = None
self.counter = None
self.results = None
#self.edges = None
self.graph = None
self.end_at = None
self.duration_seconds = None
@property
def metadata(self):
return {"app_env": APP_ENV, "users_limit": self.users_limit, "batch_size": self.batch_size}
def start(self):
print("-----------------")
print("JOB STARTING!")
self.start_at = time.perf_counter() # todo: let's use a real datetime string and add it to the metadata
self.counter = 0 # represents the number of items processed
def perform(self):
"""To be overridden by child class. Only the graph is required."""
self.results = []
#self.edges = []
self.graph = DiGraph()
def end(self):
print("-----------------")
print("JOB COMPLETE!")
self.end_at = time.perf_counter() # todo: let's use a real datetime string and add it to the metadata
self.duration_seconds = round(self.end_at - self.start_at, 2)
print(f"PROCESSED {fmt_n(self.counter)} ITEMS IN {fmt_n(self.duration_seconds)} SECONDS")
def save_metadata(self):
self.storage_service.write_metadata_to_file(self.metadata)
self.storage_service.upload_metadata()
def save_results(self):
self.storage_service.write_results_to_file(self.results)
self.storage_service.upload_results()
#def save_edges(self):
# self.storage_service.write_edges_to_file(self.edges)
# self.storage_service.upload_edges()
def save_graph(self):
self.storage_service.write_graph_to_file(self.graph)
self.storage_service.upload_graph()
def report(self):
self.storage_service.report(self.graph)
def send_completion_email(self, subject="[Tweet Analyzer] Graph Complete!"):
if APP_ENV == "production":
html = f"""
<h3>Nice!</h3>
<p>Server '{SERVER_NAME}' has completed its work.</p>
<p>So please shut it off so it can get some rest.</p>
<p>
<a href='{SERVER_DASHBOARD_URL}'>{SERVER_DASHBOARD_URL}</a>
</p>
<p>Thanks!</p>
"""
response = send_email(subject, html)
return response
def sleep(self):
if APP_ENV == "production":
print("SLEEPING...")
time.sleep(6 * 60 * 60) # six hours, more than enough time to stop the server
if __name__ == "__main__":
grapher = BaseGrapher()
grapher.start()
grapher.perform()
grapher.end()
grapher.report()
| 3,651 | 31.607143 | 111 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs/graph_storage_service.py |
import os
import json
import pickle
from memory_profiler import profile
from pandas import DataFrame
from networkx import write_gpickle, read_gpickle
from app import APP_ENV, DATA_DIR, seek_confirmation
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.gcs_service import GoogleCloudStorageService
from conftest import compile_mock_rt_graph
class GraphStorageService:
def __init__(self, local_dirpath=None, gcs_dirpath=None, gcs_service=None):
"""
Saves and loads artifacts from the networkx graph compilation process to local storage, and optionally to Google Cloud Storage.
Params:
local_dirpath (str) like "/Users/USERNAME/path/to/repo/data/graphs/2020-08-02-1818"
gcs_dirpath (str) like "storage/data/graphs/2020-08-02-1818"
"""
self.gcs_service = gcs_service or GoogleCloudStorageService()
self.gcs_dirpath = gcs_dirpath or os.path.join("storage", "data", "graphs", "example")
self.local_dirpath = local_dirpath or os.path.join(DATA_DIR, "graphs", "example")
print("----------------------")
print("GRAPH STORAGE...")
print(" GCS DIR:", self.gcs_dirpath)
print(" LOCAL DIR:", self.local_dirpath)
print("----------------------")
seek_confirmation()
if not os.path.exists(self.local_dirpath):
os.makedirs(self.local_dirpath)
#
# LOCAL STORAGE
#
@property
def local_metadata_filepath(self):
return os.path.join(self.local_dirpath, "metadata.json")
@property
def local_results_filepath(self):
return os.path.join(self.local_dirpath, "results.csv")
#@property
#def local_edges_filepath(self):
# return os.path.join(self.local_dirpath, "edges.gpickle")
@property
def local_graph_filepath(self):
return os.path.join(self.local_dirpath, "graph.gpickle")
def write_metadata_to_file(self, metadata):
"""
Params: metadata (dict)
"""
print(logstamp(), "WRITING METADATA...")
with open(self.local_metadata_filepath, "w") as f:
json.dump(metadata, f)
def write_results_to_file(self, results):
"""
Params: results (list of dict)
"""
print(logstamp(), "WRITING RESULTS...")
df = DataFrame(results)
df.index.name = "row_id"
df.index = df.index + 1
df.to_csv(self.local_results_filepath)
#def write_edges_to_file(self, edges):
# """
# Params: edges (list of dict)
# """
# print(logstamp(), "WRITING EDGES...:")
# with open(self.local_edges_filepath, "wb") as f:
# pickle.dump(edges, f)
def write_graph_to_file(self, graph):
"""
Params: graph (DiGraph)
"""
print(logstamp(), "WRITING GRAPH...")
write_gpickle(graph, self.local_graph_filepath)
def read_graph_from_file(self, graph_filepath=None):
print(logstamp(), "READING GRAPH...")
return read_gpickle(self.local_graph_filepath)
#
# REMOTE STORAGE
#
@property
def gcs_metadata_filepath(self):
return os.path.join(self.gcs_dirpath, "metadata.json")
@property
def gcs_results_filepath(self):
return os.path.join(self.gcs_dirpath, "results.csv")
#@property
#def gcs_edges_filepath(self):
# return os.path.join(self.gcs_dirpath, "edges.gpickle")
@property
def gcs_graph_filepath(self):
return os.path.join(self.gcs_dirpath, "graph.gpickle")
def upload_metadata(self):
print(logstamp(), "UPLOADING JOB METADATA...", self.gcs_metadata_filepath)
blob = self.gcs_service.upload(self.local_metadata_filepath, self.gcs_metadata_filepath)
print(logstamp(), blob) #> <Blob: impeachment-analysis-2020, storage/data/2020-05-26-0002/metadata.json, 1590465770194318>
def upload_results(self):
print(logstamp(), "UPLOADING RESULTS...", self.gcs_results_filepath)
blob = self.gcs_service.upload(self.local_results_filepath, self.gcs_results_filepath)
print(logstamp(), blob) #> <Blob: impeachment-analysis-2020, storage/data/2020-05-26-0002/metadata.json, 1590465770194318>
#def upload_edges(self):
# print(logstamp(), "UPLOADING EDGES...", self.gcs_edges_filepath)
# blob = self.gcs_service.upload(self.local_edges_filepath, self.gcs_edges_filepath)
# print(logstamp(), blob)
def upload_graph(self):
print(logstamp(), "UPLOADING GRAPH...", self.gcs_graph_filepath)
blob = self.gcs_service.upload(self.local_graph_filepath, self.gcs_graph_filepath)
print(logstamp(), blob)
def download_graph(self):
print(logstamp(), "DOWNLOADING GRAPH...", self.gcs_graph_filepath)
self.gcs_service.download(self.gcs_graph_filepath, self.local_graph_filepath)
#
# GRAPH LOADING AND ANALYSIS
#
@profile
def load_graph(self):
"""Assumes the graph already exists and is saved locally or remotely"""
if not os.path.isfile(self.local_graph_filepath):
self.download_graph()
return self.read_graph_from_file()
def report(self, graph):
"""
Params: graph (DiGraph)
"""
print("-------------------")
print(type(graph))
print(" NODES:", fmt_n(graph.number_of_nodes()))
print(" EDGES:", fmt_n(graph.number_of_edges()))
print("-------------------")
if __name__ == "__main__":
storage = GraphStorageService()
metadata = {"app_env": APP_ENV, "config": {"a":True, "b": 2500}}
storage.write_metadata_to_file(metadata)
storage.upload_metadata()
results = [
{"ts": "2020-01-01 10:00:00", "counter": 2500, "nodes": 100_000, "edges": 150_000},
{"ts": "2020-01-01 10:00:00", "counter": 5000, "nodes": 200_000, "edges": 400_000},
{"ts": "2020-01-01 10:00:00", "counter": 7500, "nodes": 300_000, "edges": 900_000}
]
storage.write_results_to_file(results)
storage.upload_results()
graph = compile_mock_rt_graph()
storage.report(graph)
storage.write_graph_to_file(graph)
storage.upload_graph()
| 6,245 | 32.945652 | 135 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs/bq_retweet_grapher.py |
import os
from networkx import DiGraph
from memory_profiler import profile
from dotenv import load_dotenv
from app.decorators.datetime_decorators import logstamp
from app.decorators.number_decorators import fmt_n
from app.friend_graphs.bq_grapher import BigQueryGrapher
load_dotenv()
USERS_LIMIT = int(os.getenv("USERS_LIMIT", default="1000")) # forces us to have a limit, unlike the base grapher version
TOPIC = os.getenv("TOPIC", default="impeach")
START_AT = os.getenv("START_AT", default="2020-01-01") # On 1/15, The House of Representatives names seven impeachment managers and votes to transmit articles of impeachment to the Senate
END_AT = os.getenv("END_AT", default="2020-01-30")
class BigQueryRetweetGrapher(BigQueryGrapher):
def __init__(self, users_limit=USERS_LIMIT, topic=TOPIC, convo_start_at=START_AT, convo_end_at=END_AT, bq_service=None, gcs_service=None):
super().__init__(bq_service=bq_service, gcs_service=gcs_service)
self.users_limit = users_limit
self.topic = topic
self.convo_start_at = convo_start_at
self.convo_end_at = convo_end_at
print("---------------------------------------")
print("RETWEET GRAPHER...")
print("---------------------------------------")
print("CONVERSATION FILTERS...")
print(f" USERS LIMIT: {self.users_limit}")
print(f" TOPIC: '{self.topic.upper()}' ")
print(f" BETWEEN: '{self.convo_start_at}' AND '{self.convo_end_at}'")
@property
def metadata(self):
return {**super().metadata, **{
"retweeters":True,
"conversation": {
"users_limit": self.users_limit,
"topic": self.topic,
"start_at": self.convo_start_at,
"end_at": self.convo_end_at,
}
}} # merges dicts
@profile
def perform(self):
self.write_metadata_to_file()
self.upload_metadata()
self.start()
self.graph = DiGraph()
self.running_results = []
for row in self.bq_service.fetch_retweet_counts_in_batches(topic=self.topic, start_at=self.convo_start_at, end_at=self.convo_end_at):
# see: https://networkx.github.io/documentation/stable/reference/classes/generated/networkx.DiGraph.add_edge.html#networkx.DiGraph.add_edge
self.graph.add_edge(row["user_screen_name"], row["retweet_user_screen_name"], rt_count=row["retweet_count"])
self.counter += 1
if self.counter % self.batch_size == 0:
rr = {"ts": logstamp(), "counter": self.counter, "nodes": len(self.graph.nodes), "edges": len(self.graph.edges)}
print(rr["ts"], "|", fmt_n(rr["counter"]), "|", fmt_n(rr["nodes"]), "|", fmt_n(rr["edges"]))
self.running_results.append(rr)
self.end()
self.report()
self.write_results_to_file()
self.upload_results()
self.write_graph_to_file()
self.upload_graph()
if __name__ == "__main__":
grapher = BigQueryRetweetGrapher.cautiously_initialized()
grapher.perform()
grapher.sleep()
| 3,131 | 37.666667 | 187 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs/bq_weekly_graph_bot_classifier.py | import os
from conftest import compile_mock_rt_graph
from app import APP_ENV, seek_confirmation
from app.retweet_graphs.bq_weekly_grapher import BigQueryWeeklyRetweetGrapher
from app.botcode_v2.classifier import NetworkClassifier as BotClassifier, DRY_RUN
if __name__ == "__main__":
storage_service = BigQueryWeeklyRetweetGrapher.init_storage_service()
# LOAD RT GRAPH
if DRY_RUN:
rt_graph = compile_mock_rt_graph()
else:
rt_graph = storage_service.load_graph()
storage_service.report(rt_graph)
seek_confirmation()
# PERFORM BOT CLASSIFICATION
classifier = BotClassifier(rt_graph)
df = classifier.bot_probabilities_df
# SAVE ARTIFACTS
local_artifacts_dir = os.path.join(storage_service.local_dirpath, "botcode_v2")
if not os.path.isdir(local_artifacts_dir):
os.mkdir(local_artifacts_dir)
remote_artifacts_dir = os.path.join(storage_service.gcs_dirpath, "botcode_v2")
if DRY_RUN:
local_csv_filepath = os.path.join(local_artifacts_dir, "mock_probabilities.csv")
local_img_filepath = os.path.join(local_artifacts_dir, "mock_probabilities_histogram.png")
remote_csv_filepath = os.path.join(remote_artifacts_dir, "mock_probabilities.csv")
remote_img_filepath = os.path.join(remote_artifacts_dir, "mock_probabilities_histogram.png")
else:
local_csv_filepath = os.path.join(local_artifacts_dir, f"bot_probabilities_{classifier.lambda_00}_{classifier.lambda_11}.csv")
local_img_filepath = os.path.join(local_artifacts_dir, f"bot_probabilities_{classifier.lambda_00}_{classifier.lambda_11}_histogram.png")
remote_csv_filepath = os.path.join(remote_artifacts_dir, f"bot_probabilities_{classifier.lambda_00}_{classifier.lambda_11}.csv")
remote_img_filepath = os.path.join(remote_artifacts_dir, f"bot_probabilities_{classifier.lambda_00}_{classifier.lambda_11}_histogram.png")
print("----------------")
print("SAVING CSV FILE...")
print(local_csv_filepath)
df.to_csv(local_csv_filepath)
storage_service.gcs_service.upload(local_csv_filepath, remote_csv_filepath)
print("----------------")
print("SAVING HISTOGRAM...")
print(local_img_filepath)
classifier.generate_bot_probabilities_histogram(img_filepath=local_img_filepath, show_img=(APP_ENV=="development"))
storage_service.gcs_service.upload(local_img_filepath, remote_img_filepath)
| 2,424 | 43.090909 | 146 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs/bq_base_grapher.py |
from retweet_graphs.base_grapher import BaseGrapher, USERS_LIMIT, BATCH_SIZE
from app.bq_service import BigQueryService
class BigQueryBaseGrapher(BaseGrapher):
def __init__(self, users_limit=USERS_LIMIT, batch_size=BATCH_SIZE, storage_service=None, bq_service=None):
super().__init__(users_limit=users_limit, batch_size=batch_size, storage_service=storage_service)
self.bq_service = bq_service or BigQueryService()
@property
def metadata(self):
return {**super().metadata, **{"bq_service": self.bq_service.metadata}}
if __name__ == "__main__":
grapher = BigQueryBaseGrapher()
grapher.start()
grapher.perform()
grapher.end()
grapher.report()
| 704 | 29.652174 | 110 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/retweet_graphs/bq_weekly_grapher.py |
import os
from dotenv import load_dotenv
from networkx import DiGraph
from memory_profiler import profile
from app import DATA_DIR, seek_confirmation
from app.decorators.datetime_decorators import dt_to_s, logstamp, dt_to_date
from app.decorators.number_decorators import fmt_n
from app.bq_service import BigQueryService
from retweet_graphs.bq_base_grapher import BigQueryBaseGrapher
from app.retweet_graphs.graph_storage_service import GraphStorageService
load_dotenv()
WEEK_ID = os.getenv("WEEK_ID")
class RetweetWeek:
def __init__(self, row):
"""
A decorator for the rows returned by the fetch_retweet_weeks() query.
Param row (google.cloud.bigquery.table.Row)
"""
self.row = row
@property
def week_id(self):
return f"{self.row.year}-{str(self.row.week).zfill(2)}" #> "2019-52", "2020-01", etc.
@property
def details(self):
details = ""
details += f"ID: {self.week_id} | "
details += f"FROM: '{dt_to_date(self.row.min_created)}' "
details += f"TO: '{dt_to_date(self.row.max_created)}' | "
details += f"DAYS: {fmt_n(self.row.day_count)} | "
details += f"USERS: {fmt_n(self.row.user_count)} | " + f"RETWEETS: {fmt_n(self.row.retweet_count)}"
return details
class BigQueryWeeklyRetweetGrapher(BigQueryBaseGrapher):
def __init__(self, bq_service=None, week_id=WEEK_ID):
bq_service = bq_service or BigQueryService()
self.week_id = week_id
print("--------------------")
print("FETCHING WEEKS...")
self.weeks = [RetweetWeek(row) for row in list(bq_service.fetch_retweet_weeks())]
for week in self.weeks:
print(" ", week.details)
print("--------------------")
print("SELECTING A WEEK...")
if not self.week_id:
self.week_id = input("PLEASE SELECT A WEEK (E.G. '2019-52', '2020-01', ETC.): ") # assumes you know what you're doing when setting WEEK_ID on production! once you run this once you'll see what all the week ids are.
try:
self.week = [wk for wk in self.weeks if wk.week_id == self.week_id][0]
print(" ", self.week.details)
except IndexError as err:
print("OOPS - PLEASE CHECK WEEK ID AND TRY AGAIN...")
exit()
self.tweets_start_at = self.week.row.min_created
self.tweets_end_at = self.week.row.max_created
seek_confirmation()
storage_service = self.init_storage_service(self.week_id)
super().__init__(bq_service=bq_service, storage_service=storage_service)
@classmethod
def init_storage_service(cls, week_id=WEEK_ID):
"""
We need to be able to call this without initializing the instance.
Allows us to load graphs after they've already been saved.
"""
if not week_id: raise ValueError("EXPECTING A WEEK ID!")
return GraphStorageService(
local_dirpath = os.path.join(DATA_DIR, "graphs", "weekly", week_id),
gcs_dirpath = os.path.join("storage", "data", "graphs", "weekly", week_id)
)
@property
def metadata(self):
return {**super().metadata, **{
"retweet_graph": {
"topic": None,
"week_id": self.week_id,
"tweets_start_at": dt_to_s(self.tweets_start_at),
"tweets_end_at": dt_to_s(self.tweets_end_at),
}
}}
@profile
def perform(self):
self.save_metadata()
self.start()
self.results = []
self.graph = DiGraph()
for row in self.bq_service.fetch_retweet_counts_in_batches(start_at=dt_to_s(self.tweets_start_at), end_at=dt_to_s(self.tweets_end_at)):
self.graph.add_edge(
row["user_screen_name"], # todo: user_id
row["retweet_user_screen_name"], # todo: retweet_user_id
weight=row["retweet_count"]
)
self.counter += 1
if self.counter % self.batch_size == 0:
rr = {
"ts": logstamp(),
"counter": self.counter,
"nodes": self.graph.number_of_nodes(),
"edges": self.graph.number_of_edges()
}
print(rr["ts"], "|", fmt_n(rr["counter"]), "|", fmt_n(rr["nodes"]), "|", fmt_n(rr["edges"]))
self.results.append(rr)
# gets us an approximate users limit but reached a fraction of the time (perhaps more performant when there are millions of rows)
if self.users_limit and self.counter >= self.users_limit:
break
self.end()
self.report()
self.save_results()
self.save_graph()
if __name__ == "__main__":
grapher = BigQueryWeeklyRetweetGrapher()
grapher.perform()
grapher.send_completion_email(subject=f"[Tweet Analysis] Retweet Graph Complete! (WK {grapher.week_id})")
grapher.sleep()
| 5,019 | 33.62069 | 226 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/botometer/sampler.py |
import os
from functools import cached_property
from botometer import Botometer
from dotenv import load_dotenv
from app import seek_confirmation, server_sleep
from app.bq_service import BigQueryService, generate_timestamp
from app.twitter_service import CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET
load_dotenv()
RAPID_API_KEY = os.getenv("RAPID_API_KEY")
LIMIT = os.getenv("LIMIT", default="5") # keep less than 1_000 per day
class BotometerScoreSampler:
"""Gets botometer scores for a random sample of users.
Random sample is drawn evenly from both classes, if possble. The sample size will be twice the limit.
Cross references previously-looked-up scores to prevent duplicate lookups.
May return less than the desired sample if there are user's not able to be looked up.
"""
def __init__(self, bq=None, limit=LIMIT):
self.bq = bq or BigQueryService()
self.dataset_address = self.bq.dataset_address.replace(";", "") # super safe about sql injection
self.limit = int(limit)
self.bom = Botometer(wait_on_ratelimit=True, rapidapi_key=RAPID_API_KEY,
consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
access_token=ACCESS_KEY,
access_token_secret=ACCESS_SECRET
)
print("-------------")
print("BOTOMETER SCORE SAMPLER...")
print(" BQ:", self.dataset_address.upper())
print(" LIMIT:", self.limit)
print(" BOTOMETER:", type(self.bom), self.bom.api_url, f"v{self.bom.api_version}")
seek_confirmation()
@property
def bots_sql(self):
return f"""
SELECT DISTINCT user_id
FROM `{self.dataset_address}.user_details_v20210806_slim`
WHERE is_bot=True -- BOTS
ORDER BY rand() -- RANDOM SAMPLE
LIMIT {self.limit}
"""
@property
def humans_sql(self):
return f"""
SELECT DISTINCT user_id
FROM `{self.dataset_address}.user_details_v20210806_slim`
WHERE is_bot=FALSE -- HUMANS
ORDER BY rand() -- RANDOM SAMPLE
LIMIT {self.limit}
"""
@cached_property
def bots_df(self):
return self.bq.query_to_df(self.bots_sql)
@cached_property
def humans_df(self):
return self.bq.query_to_df(self.humans_sql)
@cached_property
def bot_ids(self):
return self.bots_df["user_id"].tolist()
@cached_property
def human_ids(self):
return self.humans_df["user_id"].tolist()
def parse_scores(self, user_id, result) -> list:
"""convert raw botometer response structure to one or more normalized database records"""
# print(user_id)
# print(result)
lookup_at = generate_timestamp()
records = []
if "error" in result:
#> {'error': 'TweepError: Not authorized.'}
#> {'error': "TweepError: [{'code': 32, 'message': 'Could not authenticate you.'}]"}
error_message = result["error"] #> str
print(" ...", error_message)
records.append({
"user_id": user_id,
"lookup_at": lookup_at,
"error_message": error_message,
"score_type": None,
"cap": None,
"astroturf": None,
"fake_follower": None,
"financial": None,
"other": None,
"overall": None,
"self_declared": None,
"spammer": None,
})
else:
try:
#> {
#> 'cap': {'english': 0.8021481695167405, 'universal': 0.8148417533461276}
#> 'raw_scores': {
#> 'english': {'astroturf': 0.02,
#> 'fake_follower': 0.75,
#> 'financial': 0.03,
#> 'other': 0.49,
#> 'overall': 0.75,
#> 'self_declared': 0.2,
#> 'spammer': 0.29},
#> 'universal': {'astroturf': 0.02,
#> 'fake_follower': 0.78,
#> 'financial': 0.05,
#> 'other': 0.45,
#> 'overall': 0.78,
#> 'self_declared': 0.18,
#> 'spammer': 0.25}}
#>}
cap_types = sorted(list(result["cap"].keys()))
score_types = sorted(list(result["raw_scores"].keys()))
if cap_types != score_types:
raise AttributeError("OOPS unexpected response structure")
#> ["english", "universal"]
for score_type in score_types:
raw_scores = result["raw_scores"][score_type]
cap_score = result["cap"][score_type]
records.append({
"user_id": user_id,
"lookup_at": lookup_at,
"error_message": None,
"score_type": score_type,
"cap": cap_score,
"astroturf": raw_scores.get("astroturf"),
"fake_follower": raw_scores.get("fake_follower"),
"financial": raw_scores.get("financial"),
"other": raw_scores.get("other"),
"overall": raw_scores.get("overall"),
"self_declared": raw_scores.get("self_declared"),
"spammer": raw_scores.get("spammer"),
})
except Exception as err:
print("OOPS", err)
return records
@cached_property
def scores_table(self):
return self.bq.client.get_table(f"{self.dataset_address}.botometer_scores") # an API call (caches results for subsequent inserts)
def save_scores(self, scores):
"""upload a batch of scores to bigquery"""
self.bq.insert_records_in_batches(self.scores_table, scores)
def perform(self):
print("-------------------")
print("FETCHING SAMPLE...")
print(" BOT IDS:", len(self.bot_ids))
print(" HUMAN IDS:", len(self.human_ids)) # self.human_ids[0], self.human_ids[-1]
user_ids = self.bot_ids + self.human_ids
print(" USERS TOTAL:", len(user_ids))
print("-------------------")
print("PERFORMING BOTOMETER LOOKUPS...")
records = []
for user_id, result in self.bom.check_accounts_in(user_ids):
print(" USER ID:", user_id)
user_scores = self.parse_scores(user_id, result)
records += user_scores
print("-------------------")
if any(records):
print(f"SAVING SCORES ({len(records)}) ...")
self.save_scores(records)
if __name__ == "__main__":
job = BotometerScoreSampler()
job.perform()
server_sleep(seconds=24*60*60)
| 7,292 | 35.10396 | 137 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/follower_network/helper_follower_network_crawler.py | #Use this code to download tweets that contain a given keyword
# -*- coding: UTF-8 -*-
from twython import Twython
from datetime import datetime, timedelta
import numpy as np
from helper_twitter_api import *
import sqlite3
from operator import itemgetter
import os
import csv
import urllib.request, urllib.parse, urllib.error,urllib.request,urllib.error,urllib.parse,json,re,datetime,sys,http.cookiejar
from operator import itemgetter
import time
import sys
import networkx as nx
#from ioHELPER import *
def get_friends(toquery_sn,filename,thr_friends):
### CookieJar
cookieJar = http.cookiejar.CookieJar()
#thr_friends = 1000 #if a user has more than this many friends, we cant collect their data
### Start querying batch in a for loop
start =time.time()
count=0
n = len(toquery_sn)
for user in toquery_sn:
keepgoing=True
accessible=True
count+=1
ufriends=[]
oufriends=[]
#print("at user number %s of %s: %s"%(count,n,user))
### Go via the mobile twitter site instead of twitter.com. This version returns cursors in Json response to
### iterate through pages of friends for a user.
url='https://mobile.twitter.com/'+user+'/following'
### Headers
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookieJar))
headers = [
('Host', "twitter.com"),
('User-Agent', "Mozilla/5.0 (Windows NT 6.1; Win64; x64)"),
('Accept', "application/json, text/javascript, */*; q=0.01"),
('Accept-Language', "de,en-US;q=0.7,en;q=0.3"),
('X-Requested-With', "XMLHttpRequest"),
('Referer', url),
('Connection', "keep-alive")
]
opener.addheaders = headers
## Try querying first 40 friends screen_names
try :
response = opener.open(url)
jsonResponse = response.read()
res=jsonResponse.decode().split('\n')
### Parse JSON code to uncover latest number of friends of that user
try :
nfriends=[int(''.join(i.split('"count">')[1].split('</span>')[0].split('.'))) for i in res if '<td class="info"><span class="count">' in i ][0]
except Exception as e: ##protected or suspended user
print('Error occcured: ', e)
accessible=False
INACCESSIBLE.append(toquery[count-1])
if(accessible):
#if count%10==0:
print('\tUser %s of %s: %s has %s following, we will only get %s'%(count,n,user,nfriends,thr_friends))
ufriends+=[i.split('/follow/')[1].split('"')[0] for i in res if '/i/guest/follow/' in i ]
cnext=[i.split('cursor=')[1].split('"')[0] for i in res if user+'/following?cursor=' in i]
#print('\t%s: already got %s friends '%(user,len(ufriends)))
except Exception as e:
if(accessible):
if(e.code==404):
keepgoing=False
### Keep iterating through pages as long as we find results
if (len(cnext)>0 and keepgoing and accessible):
cursor=cnext[0]
### Identical to first query
while True:
### Update url with cursor to return results of new pages
url='https://mobile.twitter.com/'+user+'/following?cursor='+cursor
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookieJar))
headers = [
('Host', "twitter.com"),
('User-Agent', "Mozilla/5.0 (Windows NT 6.1; Win64; x64)"),
('Accept', "application/json, text/javascript, */*; q=0.01"),
('Accept-Language', "de,en-US;q=0.7,en;q=0.3"),
('X-Requested-With', "XMLHttpRequest"),
('Referer', url),
('Connection', "keep-alive")
]
response = opener.open(url)
jsonResponse = response.read()
res=jsonResponse.decode().split('\n')
ufriends+=[i.split('/follow/')[1].split('"')[0] for i in res if '/i/guest/follow/' in i ]
cnext=[i.split('cursor=')[1].split('"')[0] for i in res if user+'/following?cursor=' in i]
if (len(cnext)>0):
cursor=cnext[0]
else:
break
if (len(ufriends)>thr_friends):
print("\tGot %s following of %s users. Stop collecting"%(user,len(ufriends)))
break
else:
break
#print('\t%s: already got %s following '%(user,len(ufriends)))
### Exited loop, write results in a file
if(keepgoing):
### Append a line to the output file with user u friends. First element in the line is the user u.
### Then all his friends.This file contains only screen_names.
with open(filename, 'a') as fr:
line = user
if(len(ufriends) > 0):
line += ','
line += ','.join(ufriends)
else:
line += ','
fr.write(line)
fr.write('\n')
fr.close()
print('Finished crawling following network. Took ', time.time()-start)
#Convert friends graph of a target user into a networkx object
def write_friends_graph_networkx(filename_target,filename_friends_graph):
Edges = []
Nodes = {}
x=open(filename_target).read()[0:-1]
users = x.split(',')
target = users[0]
target_friends=users[1:]
Nodes[target]=1
for friend in target_friends:
Edges.append((friend,target))
Nodes[friend] = 1
nv = len(Nodes)
ne = 0
with open(filename_friends_graph) as fp:
for cnt, line in enumerate(fp):
line = line.strip('\n')
users =line.split(",")
follower = users[0]
friends = users[1:]
for friend in friends:
if friend in Nodes.keys():
ne+=1
Edges.append((friend,follower))
print("%s friends network has %s nodes and %s edges"%(target,nv,ne))
Gdir = nx.DiGraph()
for edge in Edges:
source = edge[0]
recipient = edge[1]
Gdir.add_node(source)
Gdir.add_node(recipient)
Gdir.add_edge(source,recipient)
G = Gdir.to_undirected()
#print(sorted(G.nodes()))
#G.remove_node(target)
nv = G.number_of_nodes()
ne = G.number_of_edges()
nx.write_gpickle(G,"friends_network_%s_undirected.pickle"%target)
nx.write_gpickle(Gdir,"friends_network_%s.pickle"%target)
print("Wrote friends network for %s to networkx object in pickle file"%target)
return Gdir
#Convert friends graph of a set of users tweeting about a topic into a networkx object
def write_friends_graph_tweets_networkx(Screen_names,filename_friends_graph,filename_gpickle):
Edges = []
Nodes = {}
#add all users to the Nodes dictionary
for user in Screen_names:
Nodes[user] = 1
nv = len(Nodes)
ne = 0
with open(filename_friends_graph) as fp:
for cnt, line in enumerate(fp):
line = line.strip('\n')
users =line.split(",")
follower = users[0]
friends = users[1:]
for friend in friends:
if friend in Nodes.keys():
ne+=1
Edges.append((friend,follower))
print("Following network has %s nodes and %s edges"%(nv,ne))
Gdir = nx.DiGraph()
for edge in Edges:
source = edge[0]
recipient = edge[1]
Gdir.add_node(source)
Gdir.add_node(recipient)
Gdir.add_edge(source,recipient)
G = Gdir.to_undirected()
#print(sorted(G.nodes()))
#G.remove_node(target)
nv = G.number_of_nodes()
ne = G.number_of_edges()
nx.write_gpickle(Gdir,filename_gpickle)
print("Wrote following network to networkx object in file %s"%filename_gpickle)
return Gdir
| 8,352 | 35.317391 | 159 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/follower_network/follower_network_collector.py | # -*- coding: utf-8 -*-
"""follower_network_collector.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1T0ED71rbhiNF8HG-769aBqA0zZAJodcd
"""
#This notebook builds a follower network for a set of users
#first import the helper functions
from helper_follower_network_crawler import *
import sqlite3
import pandas as pd
import random
#COLLECT FOLLOWING NETWORK
#Get the following for a set of screen names
#Filename where we will store the list of following (ends with .csv)
max_following = 2000 #maximum number of following to get per user(10 minutes for 1000 users)
max_users = int(1e4) #maximum number of users you will build the following network for
FilenameTweetDatabase = "ReopenAmerica.db"
FilenameFollowingNetwork = "followingnetwork_ReopenAmerica.csv"
print("Tweets will be loaded from database file %s"%FilenameTweetDatabase)
print("Follower network saved to file %s"%FilenameFollowingNetwork)
FilenameFollowingNetworkGpickle = FilenameFollowingNetwork.replace(".csv",".gpickle")
print("Follower network graph object saved to file %s"%FilenameFollowingNetworkGpickle)
print("We will collect maximum %s users, maximum %s following per user"%(max_users,max_following))
#ScreenNames is a list of the screen names you want to build the network of
#Define ScreenNames this way if you want to write the names by hand
#ScreenNames = ["zlisto"]
#get the list of screen_names
conn = sqlite3.connect("%s"%FilenameTweetDatabase)
df_screen_name = pd.read_sql_query("SELECT screen_name FROM tweet", conn)
ScreenNamesAll = list(set([row[0] for row in df_screen_name.values.tolist()])) #remove duplicate users
nusers = len(set(ScreenNamesAll)) #total number of unique users in tweet database
#only keep max_users
if max_users<nusers:
#ScreenNames = ScreenNamesAll[0:max_users] #take first max_users from list
ScreenNames = random.sample(ScreenNames,max_users) #take random sample from list
else:
ScreenNames = ScreenNamesAll
print("%s users in entire tweet database\nWe will build the following network for %s of them"%(nusers,len(ScreenNames)))
print("\nHere are some of your screen names:%s"%ScreenNames[0:10])
#This function collects the following of each person in ScreenNames
#and saves it to an Excel file (really a csv file)
#spot in ScreenNames to start your crawler from in case it crashes
#start_index = 0 for a fresh network
start_index = 0
#temp is the list of names you will collect following for.
#this list starts at the start_index (we do this in case your code crashes
#you can start up where you left off)
#Collection rate ~ 100 screen names per minute
temp = ScreenNames[start_index:]
print("starting at %s"%temp[start_index])
get_friends(temp,FilenameFollowingNetwork,max_following)
#This function writes the following network to a networkx object so we can analyze it
G = write_friends_graph_tweets_networkx(ScreenNames,FilenameFollowingNetwork,FilenameFollowingNetworkGpickle)
| 3,008 | 39.662162 | 120 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/bot_communities/midac_bot_community_analysis_libya.py | # -*- coding: utf-8 -*-
"""MIDAC Bot Community Analysis Libya.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1rrTv4JkYoQx0VVVtq8leYAqh6aX1VAe8
# Bot Community Analysis
Use this notebook to analyze communities in bot retweet network
Data = Bot profiles and community membership, bot tweets
Analysis steps
1) Look at popular retweeted users, arabic profiles, and account creation dates within in bot retweet community.
3) Cluster bots by creation date. Look at popular retweeted users and arabic profiles in each created_at community
"""
from datetime import datetime, timedelta
import numpy as np
import networkx as nx
from networkx.algorithms import community
import sqlite3,sys,os,string
import pandas as pd
import matplotlib.pyplot as plt
from os import path
from helper_retweet_network import *
#from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
#import arabic_reshaper
#from bidi.algorithm import get_display
"""## Load Data
Input:
1) fname_bots_db = file of database with bot tweets
2) fname_Gretweet = file where we saved the bot retweet network
3) fname_Gsim = file where we saved retweet similarity network of bots
Output:
1) df_profiles = dataframe with both profiles and created_at as a datetime object
2) df_communities = dataframe with bot profiles and communities
3) Gretweet = retweet network including bots and who they retweet
4) Gsim = similarity graph of bot accounts based on Jacard index of similarity network
"""
path_data = "Libya//"
fname_bots_db = path_data+"Libya_bot_forensics.db"
fname_Gretweet = path_data + "Gretweet.gpickle"
fname_Gsim = path_data + "Gsim.gpickle"
fname_bots_updated_csv = path_data+"Libya_bot_forensics_community.csv"
conn = sqlite3.connect("%s"%fname_bots_db)
df_tweets = pd.read_sql_query("SELECT * FROM tweet", conn)
df_profiles = pd.read_sql_query("SELECT * FROM user_profile", conn)
df_communities = pd.read_csv(fname_bots_updated_csv)
Gretweet = nx.read_gpickle(fname_Gretweet)
Gsim = nx.read_gpickle(fname_Gsim)
fmt = '%Y-%m-%d %H:%M:%S'
#convert created_at to a datetime object
Tdatetime = []
for s in df_profiles.created_at:
date_time_obj = datetime.strptime(s, fmt)
Tdatetime.append(date_time_obj)
Tdatetime = np.array(Tdatetime)
df_profiles["created_at_datetime"] = Tdatetime
Tdatetime = []
for s in df_communities.created_at:
date_time_obj = datetime.strptime(s, fmt)
Tdatetime.append(date_time_obj)
Tdatetime = np.array(Tdatetime)
df_communities["created_at_datetime"] = Tdatetime
t0 = min(Tdatetime)
ncomm = max(df_communities.Community)+1
print("%s bots\n%s bot tweets\n%s bot communities"%(len(df_profiles),
len(df_tweets),
ncomm))
"""## Function to detect Arabic characters"""
## functions to detect if a string has arabic characters
def isarabic_char(ch):
if ('\u0600' <= ch <= '\u06FF' or
'\u0750' <= ch <= '\u077F' or
'\u08A0' <= ch <= '\u08FF' or
'\uFB50' <= ch <= '\uFDFF' or
'\uFE70' <= ch <= '\uFEFF' or
'\U00010E60' <= ch <= '\U00010E7F' or
'\U0001EE00' <= ch <= '\U0001EEFF' or
ch == '\U0001F1E6' or #saudi flag emoji
ch == '\U0001F1E6'): #saudi flag emoji
return True
else:
return False
def isarabic_str(str):
x = False
for ch in str:
if isarabic_char(ch):
x = True
break
return(x)
"""## Fraction of Arabic profiles in each community"""
for counter in range(ncomm):
mask_arab = df_communities.arabic_profile==True
mask_comm = df_communities.Community==counter
nc = len(list(df_communities.screen_name[mask_comm]))
nc_arab = len(list(df_communities.screen_name[mask_comm & mask_arab]))
frac_arab = nc_arab/nc
print("Community %s has %.2f percent Arab profiles"%(counter,frac_arab))
"""## Top retweeted users in each community
For each community of bots, we form the subgraph containing the bots and everyone they retweet. Then we look at the top retweeted users in this subgraph.
Input
1) display_max = number of retweet sources to display for each community
"""
display_max = 20 #number of nodes to display
for counter in range(ncomm):
community_screen_names = list(df_communities.screen_name[df_communities.Community==counter])
Vsub = []
for v in community_screen_names:
if Gretweet.has_node(v):
nb = list(Gretweet.predecessors(v))
Vsub+=nb
Vsub.append(v)
print("Retweet community %s with %s users"%(counter,len(community_screen_names)))
G = Gretweet.subgraph(Vsub)
Dout = dict(G.out_degree())
print("Top out degree")
Centrality = Dout
display_top_centrality_nodes(Centrality,display_max)
"""## Top retweeted users in each (retweet,profile language) community
For each retweet community of bots, we separate out those
with Arabic and non-Arabic profies.
We form the subgraph containing the bots and everyone they retweet.
Then we look at the top retweeted users in this subgraph.
Input
1) display_max = number of retweet sources to display for each community
"""
display_max = 10 #number of nodes to display
for counter in range(ncomm):
mask_arab = df_communities.arabic_profile==True
mask_comm = df_communities.Community==counter
community_screen_names = list(df_communities.screen_name[mask_comm & mask_arab])
Vsub = []
for v in community_screen_names:
if Gretweet.has_node(v):
nb = list(Gretweet.predecessors(v))
Vsub+=nb
Vsub.append(v)
print("Arabic profile retweet community %s with %s users"%(counter,len(community_screen_names)))
G = Gretweet.subgraph(Vsub)
Dout = dict(G.out_degree())
print("Top out degree")
Centrality = Dout
display_top_centrality_nodes(Centrality,display_max)
for counter in range(ncomm):
mask_arab = df_communities.arabic_profile==False
mask_comm = df_communities.Community==counter
community_screen_names = list(df_communities.screen_name[mask_comm & mask_arab])
Vsub = []
for v in community_screen_names:
if Gretweet.has_node(v):
nb = list(Gretweet.predecessors(v))
Vsub+=nb
Vsub.append(v)
print("\nNon-Arabic profile retweet community %s with %s users"%(counter,len(community_screen_names)))
G = Gretweet.subgraph(Vsub)
Dout = dict(G.out_degree())
print("Top out degree")
Centrality = Dout
display_top_centrality_nodes(Centrality,display_max)
"""## Retweet sources and their bot followers
Print out the bots retweeting a retweet source in each bot community
INPUT:
1) source = screen name of retweet source
OUTPUT:
1) List of bots retweeting source in each community
"""
source = "ghadaoueiss"
display_max = 0 #number of nodes to display
nb = list(Gretweet.successors(source))
print("%s retweeted by %s bots in retweet graph "%(source,len(nb)))
for counter in range(ncomm):
community_screen_names = list(df_communities.screen_name[df_communities.Community==counter])
Vsub = list(set(community_screen_names).intersection(nb))
print("\t%s bots in community %s"%(len(Vsub),counter))
for cv,v in enumerate(Vsub):
if (cv+1)>=display_max:break
print("\t\tBot %s: %s"%(cv,v))
"""## Collect Bots Created in Different Time Windows
Choose a start and stop date. This cell will find all bots in each community created between those dates and save their profiles to a csv file whose name tell us the bot community, start date, and stop date.
Input:
tstart = start date (string)
tstop = stop date (string)
df_communities = dataframe with community info
ncomm = number of communities
"""
tstart = '2019-01-01'
tstop = '2019-06-01'
dtstart = datetime. strptime(tstart,"%Y-%m-%d")
dtstop = datetime. strptime(tstop,"%Y-%m-%d")
for counter in range(ncomm+1):
df_comm = df_communities[df_communities.Community==counter]
print("Community %s with %s accounts"%(counter,len(df_comm)))
mask0 = (df_communities.Community==counter)
mask1 = (pd.to_datetime(df_communities.created_at_datetime)>dtstart)
mask2 = (pd.to_datetime(df_communities.created_at_datetime)<=dtstop)
Bots_in_window = df_comm[mask0 & mask1 & mask2]
print("\t%s bots in community %s created betweet %s to %s"%(len(Bots_in_window),
counter,tstart,tstop))
fname = path_data + "Bots_Community_%s_%s_to_%s.csv"%(counter,tstart,tstop)
Bots_in_window.to_csv(fname)
| 8,689 | 32.041825 | 208 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/bot_communities/SpectralCommunities.py | import networkx as nx
import numpy as np
from sklearn.cluster import SpectralClustering
def spectral_clustering(G,k=2):
A = nx.adjacency_matrix(G.to_undirected())
clustering =SpectralClustering(n_clusters=k, eigen_solver=None, affinity='precomputed',n_init = 20)
clusters = clustering.fit(A)
Comm = [[] for i in range(k) ]
nv = 0 #index for the nodes cluster labels
for node in G.nodes():
node_comm = clustering.labels_[nv] #community membership of node converted to a python list index
nv+=1
X = Comm[node_comm] #community list of community c
X.append(node) #add node to the approprite community
Comm[node_comm]=X #add the community list to the big list of all communities
#print("Node %s joined community %s which has %s nodes"%(node,node_comm,len(Comm[node_comm])))
Comm.sort(reverse=True, key=len)
return Comm | 913 | 47.105263 | 106 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/bot_communities/midac_bot_community_detection_libya.py | # -*- coding: utf-8 -*-
"""MIDAC Bot Community Detection Libya.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/156K2fQM_TNps7WHcdqcMU8gDVbOthIyo
# Bot Community Detection
Use this notebook to detect communities in bot retweet network
Data = Bot profiles, bot tweets
Analysis steps
1) Cluster bots based on retweet network (who they retweet)
2) Look at popular retweeted users, arabic profiles, and account creation dates within in bot retweet community.
3) Cluster bots by creation date. Look at popular retweeted users and arabic profiles
in each created_at community
"""
from datetime import datetime, timedelta
import numpy as np
import networkx as nx
from networkx.algorithms import community
import sqlite3,sys,os,string
import pandas as pd
import matplotlib.pyplot as plt
from os import path
from helper_retweet_network import *
from SpectralCommunities import spectral_clustering
from sklearn.cluster import KMeans
#from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
#import arabic_reshaper
#from bidi.algorithm import get_display
"""## Load Data
1) fname_bots_db = file of database with bot tweets
2) fname_Gretweet = file where we will save bot retweet network
3) fname_Gsim = file where we will save retweet similarity network of bots
4) fname_bots_updated_csv = file where we save the bot profiles with community membership
"""
path_data = "Libya//"
fname_bots_db = path_data+"Libya_bot_forensics.db"
fname_Gretweet = path_data + "Gretweet.gpickle"
fname_Gsim = path_data + "Gsim.gpickle"
fname_bots_updated_csv = path_data+"Libya_bot_forensics_community.csv"
conn = sqlite3.connect("%s"%fname_bots_db)
df_tweets = pd.read_sql_query("SELECT * FROM tweet", conn)
df_profiles = pd.read_sql_query("SELECT * FROM user_profile", conn)
print("%s bots\n%s bot tweets"%(len(df_profiles),len(df_tweets)))
"""## Create and save retweet network"""
#Gretweet = retweet_network_from_tweets(df_tweets)
#nx.write_gpickle(Gretweet, fname_Gretweet)
"""## Create and save similarity network
The similarity is based on the Jaccard index. For each pair of bots we calculate the Jaccard index based on the sets of people they retweet. If two bots retweet exactly the same users, their Jaccard index is one. If they dont retweet anyone in common, their Jaccard index is zero.
"""
## Calculate Jacard similarity based on following for bots
V = list(set(df_profiles.screen_name))
print('%s bot profiles'%len(V))
ebunch = []
for counter,u in enumerate(V):
for v in V[counter+1:]:
if (Gretweet.has_node(v)) and (Gretweet.has_node(u)):
ebunch.append((u,v))
preds = nx.jaccard_coefficient(Gretweet.to_undirected(),ebunch)
print(len(ebunch), " node pairs to check Jaccard index")
print("Create similarity graph between bots using Jacard index based on retweets")
counter = 0
Gsim = nx.Graph()
ne = 0
for u, v, s in preds:
counter+=1
if s >0:
Gsim.add_edge(u, v, weight=s)
ne+=1
if counter%1e6==0:print(counter,ne, " positive weights")
nv = Gsim.number_of_nodes()
ne = Gsim.number_of_edges()
print("Gsim has %s nodes, %s edges"%(nv,ne))
print("Save similarity graph Gsim")
#nx.write_gpickle(Gsim, fname_Gsim)
"""## Create datetime objects from created at times of accounts.
We will look for temporal clusters. Add column created_at_datetime to df_profiles with creation time as a datetime object for analysis purposes.
"""
fmt = '%Y-%m-%d %H:%M:%S'
Tdatetime = []
for s in df_profiles.created_at:
date_time_obj = datetime.strptime(s, fmt)
Tdatetime.append(date_time_obj)
Tdatetime = np.array(Tdatetime)
df_profiles["created_at_datetime"] = Tdatetime
t0 = min(Tdatetime)
"""# Community detection based on retweet network
1) Load retweet network Gretweet
2) Load bot retweet similarity network Gsim
"""
Gretweet = nx.read_gpickle(fname_Gretweet)
Gsim = nx.read_gpickle(fname_Gsim)
"""## Find communities in bot retweet similarity
1) Choose number of communities to find k
2) Apply spectral clustering to Gsim to find communities
Community = list of communities. Each element is a list of screen_names in a community
"""
## Find spectral communities in the weighted similarity graph.
k = 3 #number of spectal communities
Community = spectral_clustering(Gsim,k)
print("Found %s spectral communities"%len(Community))
for counter,community in enumerate(Community):
print("Retweet Community %s with %s nodes"%(counter,len(community)))
#add Community column to df_profiles dataframe
Community_dict ={}
for counter,community in enumerate(Community):
for v in community:
Community_dict[v] = counter
Community_list = []
for v in df_profiles.screen_name:
if v in Community_dict:
Community_list.append(Community_dict[v])
else:
Community_list.append(-1)
df_profiles['Community'] = Community_list
df_profiles.to_csv(fname_bots_updated_csv)
plt.hist(df_profiles.Community)
plt.grid()
plt.show()
"""## Top retweeted users in each community
For each community of bots, we form the subgraph containing the bots and everyone they retweet. Then we look at the top retweeted users in this subgraph.
"""
display_max = 10 #number of nodes to display
for counter,c in enumerate(Community):
Vsub = []
for v in c:
if Gretweet.has_node(v):
nb = list(Gretweet.predecessors(v))
Vsub+=nb
Vsub.append(v)
print("Retweet community %s with %s users"%(counter,len(c)))
G = Gretweet.subgraph(Vsub)
Dout = dict(G.out_degree())
print("Top out degree")
Centrality = Dout
display_top_centrality_nodes(Centrality,display_max)
"""## Arabic language profiles in each bot retweet community
Look at what fraction of user profiles in each bot retweet community contain Arabic characters
"""
# Commented out IPython magic to ensure Python compatibility.
## functions to detect if a string has arabic characters
def isarabic_char(ch):
if ('\u0600' <= ch <= '\u06FF' or
'\u0750' <= ch <= '\u077F' or
'\u08A0' <= ch <= '\u08FF' or
'\uFB50' <= ch <= '\uFDFF' or
'\uFE70' <= ch <= '\uFEFF' or
'\U00010E60' <= ch <= '\U00010E7F' or
'\U0001EE00' <= ch <= '\U0001EEFF' or
ch == '\U0001F1E6' or #saudi flag emoji
ch == '\U0001F1E6'): #saudi flag emoji
return True
else:
return False
def isarabic_str(str):
x = False
for ch in str:
if isarabic_char(ch):
x = True
break
return(x)
## Look at fraction of arabic profiles in each community
for counter,c in enumerate(Community):
for cv,v in enumerate(c):
if cv ==0:
profiles = df_profiles.description[df_profiles.screen_name==v]
else:
profile = df_profiles.description[df_profiles.screen_name==v]
profiles = profiles.append(profile)
n = len(profiles)
narabic = sum([isarabic_str(p) for p in profiles])
print("Community %s: %s users, %.2f percent arabic profiles"
# %(counter,n,narabic/n))
ArabicProfile = []
for profile in df_profiles.description:
ArabicProfile.append(isarabic_str(profile))
ArabicProfile = np.array(ArabicProfile)
df_profiles["arabic_profile"] = ArabicProfile
df_profiles.to_csv(fname_bots_updated_csv)
"""## Histogram of Creation date in each bot retweet community
Plot distribution of creation dates for bots in each retweet based community
"""
t0 = min(df_profiles.created_at_datetime).to_pydatetime()
tf = max(df_profiles.created_at_datetime).to_pydatetime()
nbins = round((tf-t0).days/31)
color = ["orange","blue","red","green","yellow","pink","cyan"]
for counter,c in enumerate(Community):
T =[]
for v in c:
t = df_profiles.created_at_datetime[df_profiles.screen_name==v].values[0]
T.append(t)
T = np.array(T)
plt.hist(T,nbins,color = color[counter],
label="Community %s with %s nodes"%(counter,len(c)))
plt.title("Community %s with %s bots"%(counter,len(c)))
plt.grid()
plt.xlabel("Bot creation date")
plt.ylabel("Number of bots")
plt.ylim([0,80])
plt.show()
"""# CDF Plots and Windowed Histograms of Bot creation date grouped by retweet community"""
def cdf_created_at(df,t0,t1,window_days):
#zero out the hours, minutes, seconds, microseconds
tf_bin = tf.replace(hour=0, minute=0, second=0, microsecond=0) # Returns a copy
t0_bin = t0.replace(hour=0, minute=0, second=0, microsecond=0) # Returns a copy
bins = []
delta = timedelta(days=window_days)
GO = True
counter_days=0
while GO:
t1 = t0_bin + counter_days*delta
if t1 >tf_bin:
GO = False
break
else:
bins.append(t1)
counter_days+=1
bins = np.array(bins)
Cum_count = []
Hist_count = []
for t in bins:
cdf = len(df[df.created_at_datetime<=t])
hist_count = len(df[(df.created_at_datetime<=t) & (df.created_at_datetime>t-delta)])
Cum_count.append(cdf)
Hist_count.append(hist_count)
Cum_count = np.array(Cum_count)
CDF = Cum_count/max(Cum_count)
bins = [timestamp.date() for timestamp in bins]
return bins,Cum_count,Hist_count
t0 = min(df_profiles.created_at_datetime)
tf = max(df_profiles.created_at_datetime)
nbins = round((tf-t0).days/7)
color = ["orange","blue","red","green"]
window_days = 7
for counter,c in enumerate(Community):
df_comm = df_profiles[df_profiles.Community==counter]
bins,CDF,Hist_count = cdf_created_at(df_comm,t0,tf,window_days)
#plt.subplot(1,2,2)
#plt.plot(bins,CDF,color = color[counter],
# label="Community %s with %s nodes"%(counter,len(c)))
#plt.title("Community %s with %s bots"%(counter,len(c)))
#plt.grid()
#plt.xlabel("Bot creation date")
#plt.ylabel("Cumulative Fraction of bots")
#plt.ylim([0,80])
#plt.subplot(1,2,1)
plt.plot(bins,Hist_count,color = color[counter],
label="Community %s with %s nodes"%(counter,len(c)))
#plt.title("Community %s with %s bots"%(counter,len(c)))
plt.grid()
plt.xlabel("Bot creation date")
plt.ylabel("Number of bots")
plt.title("Community %s with %s bots, window = %s days"%(counter,len(c),window_days))
#plt.ylim([0,80])
plt.show()
## Get accounts for each community created in a given time window
window_days = 7 #length of time window to look in
bot_lim = 10
t0 = min(df_profiles.created_at_datetime)
tf = max(df_profiles.created_at_datetime)
bins,CDF,Hist_count = cdf_created_at(df_profiles,t0,tf,window_days)
print("All %s accounts"%(len(df_profiles)))
for counter1,t in enumerate(bins[0:-1]):
n = Hist_count[counter1]
t1 = bins[counter1+1]
if n>=bot_lim:
#print("\t%s to %s: %s bots made"%(t,t1,n))
1
for counter,c in enumerate(Community):
df_comm = df_profiles[df_profiles.Community==counter]
bins,CDF,Hist_count = cdf_created_at(df_comm,t0,tf,window_days)
print("Community %s with %s accounts"%(counter,len(df_comm)))
for counter1,t in enumerate(bins[0:-1]):
n = Hist_count[counter1]
t1 = bins[counter1+1]
dt1 =datetime.combine(t1, datetime.min.time())
dt = datetime.combine(t, datetime.min.time())
if n>=bot_lim:
print("\t%s to %s: %s bots made"%(t,t1,n))
mask1 = (pd.to_datetime(df_comm.created_at_datetime)>dt)
mask2 = (pd.to_datetime(df_comm.created_at_datetime)<=dt1)
Bots_in_window = df_comm[mask1 & mask2]
fname = path_data + "Bots_Community_%s_%s_to_%s.csv"%(counter,t,t1)
Bots_in_window.to_csv(fname)
#for bot in Bots_in_window.screen_name:
"""# Community detection based on account creation time
Apply k-means clustering to created_at.
Add column community_created_at to df_profiles with community membership for each profile
"""
Xdays = np.array([(t-t0).total_seconds()/(3600*24) for t in Tdatetime])
n_clusters = 3
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(Xdays.reshape(-1,1))
df_profiles['community_created_at'] = kmeans.labels_
t0 = min(df_profiles.created_at_datetime).to_pydatetime()
tf = max(df_profiles.created_at_datetime).to_pydatetime()
nbins = round((tf-t0).days/7)
for counter,center1 in enumerate(kmeans.cluster_centers_):
T = df_profiles.created_at_datetime[df_profiles.community_created_at ==counter]
ncom = len(T)
center = t0+timedelta(days=center1[0])
print("center %s: %s, %s accounts"%(counter,center,ncom))
plt.hist(T,bins = nbins)
plt.grid()
plt.xlabel("Creation Date", fontsize = 16)
plt.ylabel("Number of Twitter bots", fontsize = 16)
plt.show()
## create list of screen names of users in communities
CommunityCreatedAt =[]
for c,center in enumerate(kmeans.cluster_centers_):
l = list(df_profiles.screen_name[df_profiles.community_created_at==c])
CommunityCreatedAt.append(l)
#CDF of creation times of bots
tf_bin = tf.replace(hour=0, minute=0, second=0, microsecond=0) # Returns a copy
t0_bin = t0.replace(hour=0, minute=0, second=0, microsecond=0) # Returns a copy
bins = []
ndays = 1
delta = timedelta(days=ndays)
GO = True
counter_days=0
while GO:
t1 = t0_bin + counter_days*delta
if t1 >tf_bin:
GO = False
break
else:
bins.append(t1)
counter_days+=1
bins = np.array(bins)
CDF = []
for t in bins:
cdf = len(df_profiles[df_profiles.created_at_datetime<=t])
CDF.append(cdf)
CDF = np.array(CDF)
CDF_n = CDF/max(CDF)
plt.plot(bins,CDF_n,color = 'orange')
plt.grid()
plt.xlabel("Bot creation date")
plt.ylabel("Fraction of bots")
plt.show()
plt.plot(bins,CDF,color = 'blue')
plt.grid()
plt.xlabel("Bot creation date")
plt.ylabel("Number of bots")
plt.show()
"""## Top retweeted users in each created_at community"""
display_max = 5 #number of nodes to display
for counter,c in enumerate(CommunityCreatedAt):
Vsub = []
for v in c:
if Gretweet.has_node(v):
nb = list(Gretweet.predecessors(v))
Vsub+=nb
print("Created_at community %s with %s users"%(counter,len(c)))
G = Gretweet.subgraph(Vsub)
Dout = dict(G.out_degree())
print("Top out degree")
Centrality = Dout
display_top_centrality_nodes(Centrality,display_max)
"""## Arabic language profiles
Look at what fraction of user profiles in each created_at community contain Arabic characters
"""
# Commented out IPython magic to ensure Python compatibility.
for counter,c in enumerate(CommunityCreatedAt):
for cv,v in enumerate(c):
if cv ==0:
profiles = df_profiles.description[df_profiles.screen_name==v]
else:
profile = df_profiles.description[df_profiles.screen_name==v]
profiles = profiles.append(profile)
n = len(profiles)
narabic = sum([isarabic_str(p) for p in profiles])
print("Created_at Community %s: %s users, %.2f percent arabic profiles"
# %(counter,n,narabic/n))
"""## Compare Arabic to non-Arabic profiles
Calculate which bot profiles have arabic characters and add arabic_profile boolean column to df_profiles
"""
Arabic = []
for p in df_profiles.description:
arabic = isarabic_str(p)
Arabic.append(arabic)
df_profiles["arabic_profile"] = Arabic
narabic = len(df_profiles[df_profiles.arabic_profile])
n = len(df_profiles)
print("%s bots, %s arabic profiles"%(n,narabic))
"""## Histogram of created_at for Arabic and non-arabic profiles"""
nbins = 100
plt.hist(df_profiles.created_at_datetime[df_profiles.arabic_profile==True],
label = "Arabic profiles",color = "orange",bins = nbins)
plt.grid()
plt.title("Arabic profiles",fontsize = 16)
plt.xlabel("Bot creation date",fontsize = 14)
plt.ylabel("Number of bots",fontsize = 14)
plt.show()
plt.hist(df_profiles.created_at_datetime[df_profiles.arabic_profile==False],
label = "Non-Arabic profiles",color = "blue",bins = nbins)
plt.grid()
plt.title("Non-Arabic profiles",fontsize = 16)
plt.xlabel("Bot creation date",fontsize = 14)
plt.ylabel("Number of bots",fontsize = 14)
plt.show()
## Look at number of bots created in each day
t0 = min(df_profiles.created_at_datetime).to_pydatetime()
tf = max(df_profiles.created_at_datetime).to_pydatetime()
ndays = 7 #temporal window to look for bot creations
delta = timedelta(days=ndays)
t1 = t0
Nbots =[]
T = []
while t1<tf:
t1 = t1+delta
nbots = len(df_profiles[(df_profiles.created_at_datetime>=t1) &
(df_profiles.created_at_datetime<t1+delta)])
Nbots.append(nbots)
T.append(t1)
plt.plot(T,Nbots,color = 'orange')
plt.grid()
plt.xlabel("Bot creation date",fontsize = 14)
plt.ylabel("Number of bots",fontsize = 14)
(tf-t0).days
| 16,819 | 31.284069 | 283 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/botcode/networkClassifierHELPER.py | import math
import networkx as nx
from collections import defaultdict
from operator import itemgetter
import numpy as np
import time
from ioHELPER import *
#####################################################################################################
####################### BUILD RETWEET NX-(SUB)GRAPH FROM DICTIONNARY ################################
#####################################################################################################
'''
Takes as input a csv file of retweet relationships and builds
a NetworkX object, in order to apply prebuilt mincut algorithms
'''
def buildRTGraph(graph, subNodes, lowerBound=0):
'''
INPUTS:
## graph (csv file)
a csv file with ID of user retweeting, user retweeted, and number of retweets. (see README for more details)
## subNodes (list of ints)
a list of users IDs if you want to only consider a subgraph of the RT graph
## lowerBound (int)
an int to only consider retweet relationship if retweet count from User1 to User2 is above bound (sparsify graph)
'''
G = nx.DiGraph()
count = 0
firstInter = list(np.unique(np.intersect1d(subNodes, list(graph.keys()))))
for node in firstInter:
count+=1
print("at user n" + str(count) + " on " + str(len(graph)))
unique2, counts = np.unique(graph[node], return_counts=True)
res = dict(zip(unique2, counts))
inter = np.unique(np.intersect1d(unique2,subNodes))
for i in inter:
w=res[i]
if(i!=node and w >= lowerBound):
G.add_node(node)
G.add_node(i)
G.add_edge(node, i, weight = w)
return G;
############################################################################
####################### BUILD/CUT ENERGY GRAPH #############################
############################################################################
'''
Takes as input the RT graph and builds the energy graph.
Then cuts the energy graph to classify
'''
def computeH(G, piBot ,edgelist_data, graph_out, graph_in):
H=nx.DiGraph()
'''
INPUTS:
## G (ntwkX graph)
the Retweet Graph from buildRTGraph
## piBot (dict of floats)
a dictionnary with prior on bot probabilities. Keys are users_ids, values are prior bot scores.
## edgelist_data (list of tuples)
information about edges to build energy graph.
This list comes in part from the getLinkDataRestrained method
## graph_out (dict of ints)
a graph that stores out degrees of accounts in retweet graph
## graph_in (dict of ints)
a graph that stores in degrees of accounts in retweet graph
'''
user_data={i:{
'user_id':i,
'out':graph_out[i],
'in':graph_in[i],
'old_prob': piBot[i],
'phi_0': max(0,-np.log(float(10**(-20)+(1-piBot[i])))),
'phi_1': max(0,-np.log(float(10**(-20)+ piBot[i]))),
'prob':0,
'clustering':0
} for i in G.nodes()}
set_1 = [(el[0],el[1]) for el in edgelist_data]
set_2 = [(el[1],el[0]) for el in edgelist_data]
set_3 = [(el,0) for el in user_data]
set_4 = [(1,el) for el in user_data]
H.add_edges_from(set_1+set_2+set_3+set_4,capacity=0)
for i in edgelist_data:
val_00 = i[2][0]
val_01 = i[2][1]
val_10 = i[2][2]
val_11 = i[2][3]
H[i[0]][i[1]]['capacity']+= 0.5*(val_01+val_10-val_00-val_11)
H[i[1]][i[0]]['capacity'] += 0.5*(val_01+val_10-val_00-val_11)
H[i[0]][0]['capacity'] += 0.5*val_11+0.25*(val_10-val_01)
H[i[1]][0]['capacity'] += 0.5*val_11+0.25*(val_01-val_10)
H[1][i[0]]['capacity'] += 0.5*val_00+0.25*(val_01-val_10)
H[1][i[1]]['capacity'] += 0.5*val_00+0.25*(val_10-val_01)
if(H[1][i[0]]['capacity']<0):
print("Neg capacity")
break;
if(H[i[1]][0]['capacity']<0):
print("Neg capacity")
break;
if(H[1][i[1]]['capacity']<0):
print("Neg capacity")
break;
if(H[i[0]][0]['capacity']<0):
print("Neg capacity")
break;
for i in user_data.keys():
H[1][i]['capacity'] += user_data[i]['phi_0']
if(H[1][i]['capacity'] <0):
print("Neg capacity");
break;
H[i][0]['capacity'] += user_data[i]['phi_1']
if(H[i][0]['capacity'] <0):
print("Neg capacity");
break;
cut_value,mc=nx.minimum_cut(H,1,0)
PL=list(mc[0]) #the other way around
if 1 not in PL:
print("Double check")
PL=list(mc[1])
PL.remove(1)
return H, PL, user_data
###############################################################################
####################### COMPUTE EDGES INFORMATION #############################
###############################################################################
'''
Takes as input the RT graph and retrieves information on edges
to further build H.
'''
def getLinkDataRestrained(G):
'''
INPUTS:
## G (ntwkX graph)
the Retweet Graph from buildRTGraph
'''
edges = G.edges(data=True)
e_dic = dict(((x,y), z['weight']) for x, y, z in edges)
link_data = []
for e in e_dic:
i=e[0]
j=e[1]
rl=False
wrl=0
if((j,i) in e_dic.keys()):
rl = True
wrl = e_dic[(j,i)]
link_data.append([i,j,True,rl, e_dic[e], wrl])
return link_data;
##########################################################################
####################### POTENTIAL FUNCTION ###############################
##########################################################################
'''
Compute joint energy potential between two users
'''
def psi(u1, u2, wlr, in_graph, out_graph,alpha,alambda1,alambda2,epsilon):
'''
INPUTS:
## u1 (int)
ID of user u1
## u2 (int)
ID of user u2
## wlr (int)
number of retweets from u1 to u2
## out_graph (dict of ints)
a graph that stores out degrees of accounts in retweet graph
## in_graph (dict of ints)
a graph that stores in degrees of accounts in retweet graph
## alpha (list of floats)
a list containing hyperparams mu, alpha1, alpha2
## alambda1 (float)
value of lambda11
## alambda2 (float)
value of lambda00
## epsilon (int)
exponent such that delta=10^(-espilon), where lambda01=lambda11+lambda00-1+delta
'''
#here alpha is a vector of length three, psi decays according to a logistic sigmoid function
val_00 = 0
val_01 = 0
val_10 = 0
val_11 = 0
if out_graph[u1]==0 or in_graph[u2]==0:
print("Relationship problem: "+str(u1)+" --> "+str(u2))
temp = alpha[1]/float(out_graph[u1])-1 + alpha[2]/float(in_graph[u2])-1
if temp <10:
val_01 =wlr*alpha[0]/(1+np.exp(temp))
else:
val_01=0
val_10 = (alambda2+alambda1-1+epsilon)*val_01
val_00 = alambda2*val_01
val_11 = alambda1*val_01
test2 = 0.5*val_11+0.25*(val_10-val_01)
test3 = 0.5*val_00+0.25*(val_10-val_01)
if(min(test2,test3)<0):
print('PB EDGE NEGATIVE')
val_00 = val_11 = 0.5*val_01
if(val_00+val_11>val_01+val_10):
print(u1,u2)
print('psi01',val_01)
print('psi11',val_11)
print('psi00',val_00)
print('psi10',val_10)
print("\n")
values = [val_00,val_01,val_10,val_11]
return values;
| 6,747 | 27.837607 | 115 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/botcode/MPI_graphCut.py | #################################################################################
################################# IMPORTS #######################################
#################################################################################
## BASIC
import os
import sys
import math
import datetime
import random
import numpy as np
import networkx as nx
## OWN
from ioHELPER import *
from networkClassifierHELPER import *
## PARALLELIZE JOBS
from mpi4py import MPI
###################################################################################
################################# SETUP MPI #######################################
###################################################################################
nproc = MPI.COMM_WORLD.Get_size() # Size of communicator
rank = MPI.COMM_WORLD.Get_rank() # Ranks in communicator
inode = MPI.Get_processor_name() # Node where this MPI process runs
comm = MPI.COMM_WORLD
#################################################################################################
################################# READ INPUT HYPERPARAMS #######################################
#################################################################################################
mu = float(sys.argv[1]) ##this is called gamma in the paper
alpha1 = float(sys.argv[2]) ##alpha_1 in the paper
alpha2 = float(sys.argv[3]) ##alpha_2 in the paper
iterations = int( sys.argv[4]) ##number of iterations (=1 cut/classify iteration in paper)
db = sys.aragv[5] ##name of the database/event studied (must match the DB_NAME name in RT_grahs/DB_NAME_G0_RT_GRAPH.csv)
mode = sys.argv[6] ##choose the prior to use : no prior, botometer scores, random scores, verified accounts, friends/fol ratio/score
alambda1 = float(sys.argv[7]) ##lamba11 parameter in paper, chosen equal to 0.8
alambda2 = float(sys.argv[8])##lambda00 parameter in paper, chose equal to 0.6
epsilon = 10**(-float(sys.argv[9]))##named delta in paper, should be close to 0 (eg. 0.001) in order for lambda10 to be slightly > to lambda00+lambda11-1.
SEED = int(sys.argv[10])
alpha=[mu,alpha1,alpha2]
##Sanity Check
print('Here')
######################################################################################################
################################# READ INPUT RETWEET GRAPH ##########################################
######################################################################################################
G0=readCSVFile_G('RT_graphs/'+db+'_G0_RT_GRAPH.csv')
##create directories for storing processor wise results
if not os.path.exists('./'+db+'_subGraphs'):
os.makedirs('./'+db+'_subGraphs')
if not os.path.exists('./network_piBots_'+db):
os.makedirs('./network_piBots_'+db)
##split all users in batches
all_users = sorted(list(G0.nodes()))
totUsers = len(all_users)
batch_size = int(totUsers/(nproc-1))
##non master processors compute the local probability of being on either side ot the cut
all_users = sorted(list(G0.nodes()))
totUsers = len(all_users)
batch_size = int(totUsers/(nproc-1))
if(rank !=nproc-1):
countC = 0
while(True and countC < iterations):
local_piBot={}
ready = comm.recv(source=nproc-1)
PL=[int(i) for i in readCustomFile('./'+db+'_subGraphs/PL_mu_'+str(mu)+'_alpha1_'+str(alpha1)+'_alpha2_'+str(alpha2)+'_lambda1_'+str(alambda1)+'_lambda2_'+str(alambda2)+'_epsilon_'+str(epsilon)+'_mode_'+mode+'.csv')]
H=readCSVFile_H('./'+db+'_subGraphs/H_mu_'+str(mu)+'_alpha1_'+str(alpha1)+'_alpha2_'+str(alpha2)+'_lambda1_'+str(alambda1)+'_lambda2_'+str(alambda2)+'_epsilon_'+str(epsilon)+'_mode_'+mode+'_'+str(0)+'.csv')
if(rank<nproc-2):
subUsers = all_users[rank*batch_size:(rank+1)*batch_size]
else :
subUsers = all_users[rank*batch_size:]
print(rank, 'gotya, I have ', len(subUsers), ' accounts to compute')
count=0
for node in subUsers:
neighbors=list(np.unique([i for i in nx.all_neighbors(H,node) if i not in [0,1]]))
ebots=list(np.unique(np.intersect1d(neighbors,PL)))
ehumans=list(set(neighbors)-set(ebots))
psi_l= sum([H[node][j]['capacity'] for j in ehumans])- sum([H[node][i]['capacity'] for i in ebots])
psi_l_bis= psi_l + H[node][0]['capacity'] - H[1][node]['capacity'] ##proba to be in 1 = notPL
if (psi_l_bis)>12:
local_piBot[node] = 0
else:
local_piBot[node] = 1./(1+np.exp(psi_l_bis)) #Probability in the target (0) class
print(rank, 'done, there you go ')
comm.send(local_piBot, dest=nproc-1)
countC +=1
##master processor first sends cut info, then aggregates results of local pibots from other processors
if(rank==nproc-1):
np.random.seed(SEED)
countP=0
piBot = dict.fromkeys(all_users,0.5)
##base mode = no prior = all accounts set to proba bot=0.5 at beginning
piBot = dict.fromkeys(all_users,0.5)
##different modes for different priors.
##use botometer scores
if(mode=='boto'):
boto = readCustomDic_Pibot('botometer_scores/'+db+'_fullBotometer_piBots.csv')
med = np.median(list(boto.values()))
for i in piBot:
if(i in boto):
piBot[i]=boto[i]
else:
piBot[i]=med
##random priors
elif(mode=='random_unif'):
rand=np.random.uniform(0,1,len(piBot))
piBot=dict(zip(all_users,rand))
elif(mode=='random_gauss'):
rand=np.random.normal(0.5,0.1,len(piBot))
piBot=dict(zip(all_users,rand))
##############################
###### Perform the cut ######
##############################
while(True and countP < iterations):
inDeg = G0.in_degree(weight='weight')
if(type(inDeg)!=dict):
graph_in = dict((x,y) for x, y in inDeg)
else:
graph_in = inDeg
outDeg = G0.out_degree(weight='weight')
if(type(outDeg)!=dict):
graph_out = dict((x,y) for x, y in outDeg)
else :
graph_out = outDeg
print("Starting get link data step")
link_data = getLinkDataRestrained(G0)
for n in G0.nodes():
if n not in graph_in.keys():
graph_in[n]=0
if n not in graph_out.keys():
graph_out[n]=0
edgelist_data =[(i[0], i[1], psi(i[0],i[1],i[4],graph_in, graph_out,alpha,alambda1,alambda2,epsilon)) for i in link_data]
print("tot edgelist", len(edgelist_data))
##ease computations by only keeping edges with non zero weight
edgelist_data = [t for t in edgelist_data if sum(t[2]) > 0]
print("only > 0 edgelist", len(edgelist_data))
H, PL, user_data = computeH(G0, piBot, edgelist_data, graph_out, graph_in)
print(rank, 'completed graph cut, send it to children')
writeCSVFile('./'+db+'_subGraphs/PL_mu_'+str(mu)+'_alpha1_'+str(alpha1)+'_alpha2_'+str(alpha2)+'_lambda1_'+str(alambda1)+'_lambda2_'+str(alambda2)+'_epsilon_'+str(epsilon)+'_mode_'+mode+'.csv',PL)
writeCSVFile_H('./'+db+'_subGraphs/H_mu_'+str(mu)+'_alpha1_'+str(alpha1)+'_alpha2_'+str(alpha2)+'_lambda1_'+str(alambda1)+'_lambda2_'+str(alambda2)+'_epsilon_'+str(epsilon)+'_mode_'+mode+'_'+str(0)+'.csv',H)
##send a flag to children processors to start computing local pibot
for i in range(0,nproc-2):
comm.send(True, dest=i)
comm.send(True, dest=nproc-2)
##receive results from children and aggregate
print(rank, 'ready to receive')
gather=[]
for i in range(0,nproc-1):
r = comm.recv(source=i)
gather.append(r)
print(rank, ' received from ', i)
users = list(user_data.keys())
clustering =dict.fromkeys(users,0)
for user in PL:
user_data[user]['clustering'] = 1
clustering[user] = 1
print(rank, ' received ', len(gather), ' local piBots')
piBot = {}
for d in gather:
for user in d:
piBot[user] = d[user]
##write result of aggregation. Clustering= hard score, piBot= continuous score between 0 and 1 (pick threshold)
writeCSVFile_piBot('./network_piBots_'+db+'/ntwk_piBot_mu_'+str(mu)+'_alpha1_'+str(alpha1)+'_alpha2_'+str(alpha2)+'_lambda1_'+str(alambda1)+'_lambda2_'+str(alambda2)+'_epsilon_'+str(epsilon)+'_mode_'+mode+'_iteration_'+str(countP)+'_SEED_'+str(SEED)+'.csv', piBot)
#writeCSVFile_piBot('./network_piBots_'+db+'/ntwk_clustering_mu_'+str(mu)+'_alpha1_'+str(alpha1)+'_alpha2_'+str(alpha2)+'_lambda1_'+str(alambda1)+'_lambda2_'+str(alambda2)+'_epsilon_'+str(epsilon)+'_mode_'+mode+'_iteration_'+str(countP)+'.csv', clustering)
countP += 1
MPI.Finalize()
| 8,155 | 35.410714 | 266 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/botcode/ioHELPER.py | import numpy as np
from os import listdir
from os.path import isfile, join
import datetime
import networkx as nx
def readCSVFile_urls(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
if(len(line) > 0):
temp = line.split(';')
res[temp[0]] = temp[1]
return res;
def readCSVFile_urls_urls(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
if(len(line) > 0):
try:
temp = line.split(';')
res[temp[0]] = [int(i) for i in temp[1][1:-1].split(', ')]
except:
continue;
return res;
def readCSVFile_urls_users(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
if(len(line) > 0):
temp = line.split(';')
res[int(temp[0])] = temp[1]
return res;
def readCSVFile_screenNames(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
if(len(line) > 0):
temp = line.split(';')
res[int(temp[0])] = temp[1]
return res;
def readCustomFile(path):
file = open(path, 'r').read().split('\n')
res = []
for line in file:
if(len(line) >0):
res.append(line)
return res;
def readCustomDic_Pibot(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
temp = line.split(';')
if(len(temp) > 1):
res[int(temp[0])] = float(temp[1])
return res;
def readCustomDic_hashtagsPibot(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
temp = line.split(';')
if(len(temp) > 1):
res[temp[0]] = float(temp[1])
return res;
def readCustomTemp(path):
file = open(path, 'r').read().split('\n')
res = []
for line in file:
temp = line.split(';')
if len(temp)>1:
res.append(temp)
return res;
def readCustomDic(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
temp = line.split(';')
res[temp[0]] = temp[1:]
return res;
def readCustomDic_graph(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
temp = line.split(';')
if(len(temp) > 1):
res[int(temp[0])] = [int(i) for i in temp[1:]]
return res;
def readCustomDic_folGraph(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
if(len(line)>0):
temp = line.split(';')
res[int(temp[0])] = [int(i) for i in temp[1:]]
return res;
def readCustomDic_interRTTimes(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
temp = line.split(';')
if(len(temp) > 1):
res[int(temp[0])] = [float(i) for i in temp[1:]]
return res;
def readCustomDic_table(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
temp = line.split(';')
if(len(temp) > 1):
res[int(temp[0])] = [str(i) for i in temp[1:]]
return res;
def readCustomDic_index(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file[:-1]:
temp = line.split(';')
user = int(temp[0])
sub_dic={}
for i in temp[1:-1]:
key=i.split("-->")[0]
value=i.split("-->")[1]
sub_dic[key] = value
res[user]=sub_dic
return res;
def writeCSVFile(path, list):
file = open(path, 'w')
for i in list:
line = str(i)
print(line)
file.write(line)
file.write('\n')
file.close()
return 0;
def completeCSVFile(path, list):
file = open(path, 'a')
for i in list:
line = str(i)
print(line)
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_screenNames(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user)
SN = dic[user]
line += ';' + SN
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_table(path, table):
file = open(path, 'w')
count=0
file.write(table[0][0]+';'+table[0][1]+';'+table[0][2]+';'+table[0][3]+';'+table[0][4]+';'+table[0][5])
file.write('\n')
for i in table[1:]:
count+=1
print("at user n"+str(count))
data = [str(j) for j in i]
line = ';'.join(data)
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_dic(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user)
if(len(dic[user]) > 0):
friends = [str(i) for i in dic[user]]
line += ';'
line += ';'.join(friends)
else:
line+=';'
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_dic_urls(path, dic):
file = open(path, 'w')
for url in dic.keys():
line = str(url)
if(len(dic[url]) > 0):
line += ';'
line += dic[url]
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_interRTTimes(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user)
if(len(dic[user]) > 0):
intertimes = [str(i) for i in dic[user]]
line += ';'
line += ';'.join(intertimes)
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_positions(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user)
if(len(dic[user]) > 0):
line += ';'
x = str(dic[user][0])
line += x
line+= ';'
y = str(dic[user][1])
line += y
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_datetimes(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user)
if(len(dic[user]) > 0):
intertimes = [i.strftime('%m/%d/%Y - %H:%M:%S') for i in dic[user]]
line += ';'
line += ';'.join(intertimes)
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_piBot(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user) + ';' + str(dic[user])
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_index(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user) + ';'
sub_dic = dic[user]
for key in sub_dic:
line = line + str(key) + "-->" + str(sub_dic[key]) + ";"
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_G(path, G):
file = open(path, 'w')
for i in G.edges(data=True):
line = str(i[0]) + ';' + str(i[1]) + ';' + str(i[2]['weight'])
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_Gzero(path, G):
file = open(path, 'w')
for i in G.edges(data=True):
line = str(i[0]) + ';' + str(i[1])
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_H(path, G):
file = open(path, 'w')
for i in G.edges(data=True):
line = str(i[0]) + ';' + str(i[1]) + ';' + str(i[2]['capacity'])
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_undirG(path, G):
file = open(path, 'w')
for i in G.edges(data=True):
if(type(i[0])==np.str_):
url = i[0]
user = i[1]
else:
url=i[1]
user=i[0]
line = str(user) + ';' + str(url)
file.write(line)
file.write('\n')
file.close()
return 0;
def readCSVFile_positions(path):
positions = {}
file = open(path, 'r').read().split('\n')
for i in file:
if(len(i)>0):
split = i.split(';')
positions[int(split[0])]=np.array([float(split[1]),float(split[2])])
return positions;
def readCSVFile_undirG(path):
G = nx.Graph()
file = open(path, 'r').read().split('\n')
for i in file:
if(len(i)>0):
split = i.split(';')
G.add_edge(int(split[0]),int(split[1]),weight = float(split[2]))
return G;
def readCSVFile_undir_unweighted(path):
G = nx.Graph()
file = open(path, 'r').read().split('\n')
for i in file:
if(len(i)>0):
split = i.split(';')
G.add_edge(int(split[0]),int(split[1]))
return G;
def readCSVFile_G(path):
G = nx.DiGraph()
file = open(path, 'r').read().split('\n')
for i in file:
if(len(i)>0):
split = i.split(';')
G.add_edge(int(split[0]),int(split[1]),weight = float(split[2]))
return G;
def readCSVFile_Gzero(path):
G = nx.DiGraph()
file = open(path, 'r').read().split('\n')
for i in file:
if(len(i)>0):
split = i.split(';')
G.add_edge(int(split[0]),int(split[1]))
return G;
def readCSVFile_H(path):
H = nx.DiGraph()
file = open(path, 'r').read().split('\n')
for i in file:
if(len(i)>0):
split = i.split(';')
H.add_edge(int(split[0]),int(split[1]), capacity = float(split[2]))
return H;
# def read_data(dirname):
# fileList = [ f for f in listdir(dirname) if isfile(join(dirname,f)) ]
# dic = {}
# for f in fileList:
| 8,254 | 18.939614 | 104 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/bot_impact_v2/assess_impeachment_analysis.py | # -*- coding: utf-8 -*-
"""Assess Impeachment Analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1UZxvODJREDEIg4KuTqqIhCJ3f6psRb91
# Assess Bot Impact on Impeachment Analysis
This code will let you analyze the bot impact that has been calculated for each day during the impeachment
"""
#from assess_helper import *
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import sparse
import sys
import networkx as nx
import os
import seaborn as sns
"""## Mount Google Drive
The data files are located in the Google Drive path. This is also where we will save network files and assess results.
"""
from google.colab import drive
drive.mount('/content/drive')
path = '/content/drive/My Drive/NeuralNetworkOpinions/Impeachment/'
path_dates = path+"daily_active_edge_friend_graphs_v5/"
Dates = sorted(os.listdir(path_dates))
print(f"Impeachment data for {len(Dates)} days\n{Dates[0]} to {Dates[-1]}")
"""## Helper functions
These functions collect basic statistics about the tweets, users, and bots in each day.
"""
def bot_reach(df):
Bots0 = set(df.screen_name[(df.bot==1) & (df.opinion_tweet<0.5)].values)
Bots1 = set(df.screen_name[(df.bot==1) & (df.opinion_tweet>0.5)].values)
Followers0 = []
Followers1 = []
for index,row in df.iterrows():
u = row.screen_name
friends = row.friend_names.split(",")
for friend in friends:
if friend in Bots0:
Followers0.append(u)
if friend in Bots1:
Followers1.append(u)
reach = len(set(Followers0 + Followers1))
reach0 = len(set(Followers0))
reach1 = len(set(Followers1))
return (reach,reach0,reach1)
def date_data(df,df_assess):
#mean shift with and without bots
mu_bot = df_assess.opinion_equilibrium_bot.mean()
mu_nobot = df_assess.opinion_equilibrium_nobot.mean()
std_bot = df_assess.opinion_equilibrium_bot.std()
std_nobot = df_assess.opinion_equilibrium_nobot.std()
#human count
nh_0 = len(df[(df.bot==0) & (df.opinion_tweet<0.5)])
nh_1 = len(df[(df.bot==0) & (df.opinion_tweet>0.5)])
nh = len(df[df.bot==0])
#bot count
nbots_0 = len(df[(df.bot==1) & (df.opinion_tweet<0.5)])
nbots_1 = len(df[(df.bot==1) & (df.opinion_tweet>0.5)])
nbots = len(df[df.bot==1])
n = len(df)
nnonstub = len(df_assess[df_assess.stubborn==0])
nstub = len(df_assess[df_assess.stubborn==1])
#non-bot tweets
ntweets_0 = df.rate[(df.bot==0) & (df.opinion_tweet<0.5)].sum()
ntweets_1 = df.rate[(df.bot==0) & (df.opinion_tweet>0.5)].sum()
ntweets = df.rate[df.bot==0].sum()
#bot tweets
ntweets_bot0 = df.rate[(df.bot==1) & (df.opinion_tweet<0.5)].sum()
ntweets_bot1 = df.rate[(df.bot==1) & (df.opinion_tweet>0.5)].sum()
ntweets_bot = df.rate[df.bot==1].sum()
(reach,reach0,reach1) = bot_reach(df)
date_dict ={'date':date,'num_human':nh,'num_human_0':nh_0, 'num_human_1':nh_1,
'num_bot':nbots,'num_bot_0':nbots_0, 'num_bot_1':nbots_1,
'num_human_tweets':ntweets,'num_human_0_tweets':ntweets_0, 'num_human_1_tweets':ntweets_1,
'num_bot_tweets':ntweets_bot,'num_bot_0_tweets':ntweets_bot0, 'num_bot_1_tweets':ntweets_bot1,
'mean_opinion_equilibrium_nobot':mu_nobot,'mean_opinion_equilibrium_bot':mu_bot,
'std_opinion_equilibrium_nobot':std_nobot,'std_opinion_equilibrium_bot':std_bot,
'reach_bot':reach,'reach_bot_0':reach0,'reach_bot_1':reach1}
return date_dict
# Commented out IPython magic to ensure Python compatibility.
# %%time
#
# Data = []
#
# for cnt,date in enumerate(Dates):
# #if cnt>3:break
# print("Assess for %s (%s of %s)"%(date,cnt,len(Dates)))
#
# path_data = path_dates+ '%s/'%date
# model_name = 'BERT_Impeachment_800KTweets'
# node_pred_filename = path_data+"nodes_%s.csv"%model_name
# node_filename = path_data+"nodes.csv"
# assess_filename = path_data+"assess.csv"
#
# print("\tLoading data")
# df_nodes = pd.read_csv(node_filename)
# df_nodes.reset_index()
# df_nodes_pred = pd.read_csv(node_pred_filename)
#
# print("\tMerge dataframes")
# df = df_nodes.merge(df_nodes_pred,how ='inner')
# df["bot"] = df["bot"].astype(int)
#
#
# df_assess = pd.read_csv(assess_filename)
# #df_opinions = df.merge(df_assess,how='inner')
#
# print(f"\tGet basic stats for each day")
# date_dict = date_data(df,df_assess)
# Data.append(date_dict)
# print(f"\tSave data to csv")
# df_date = pd.DataFrame.from_dict(Data)
# df_date.to_csv(path+"assess_all_days.csv")
#
#
# print(df_date.head())
df_date.head()
## plot user vs date, tweets vs date, separated by sentiment
format='%Y%m%d'
df_date['datetime'] = pd.to_datetime(df_date.date)
#supported values are '-', '--', '-.', ':', 'None', ' ', '', 'solid', 'dashed', 'dashdot', 'dotted'
sns.set_style("whitegrid")
f, ax = plt.subplots(figsize=(16, 8))
plt.subplot(2,1,1)
#sns.lineplot(data = df_date,x='datetime', y = 'num_bot',ls= 'solid',marker= "o",label = 'Number of bots',color = 'purple')
sns.lineplot(data = df_date,x='datetime', y = 'num_bot_0',ls= 'solid',marker= "o",label = 'Anti-Trump',color = 'blue')
sns.lineplot(data = df_date,x='datetime', y = 'num_bot_1',ls= 'solid',marker= "o",label = 'Pro-Trump',color = 'red')
plt.ylabel("Number of bots",fontsize = 16)
plt.xlabel("Date",fontsize = 16)
plt.legend()
plt.subplot(2,1,2)
#sns.lineplot(data = df_date,x='datetime', y = 'num_human',ls= 'solid',marker= "s",label = 'Number of users',color = 'purple')
sns.lineplot(data = df_date,x='datetime', y = 'num_human_0',ls= 'solid',marker= "s",label = 'Anti-Trump',color = 'blue')
sns.lineplot(data = df_date,x='datetime', y = 'num_human_1',ls= 'solid',marker= "s",label = 'Pro-Trump',color = 'red')
plt.ylabel("Number of humans",fontsize = 16)
plt.xlabel("Date",fontsize = 16)
plt.legend()
plt.show()
"""## Plot number of tweets per day, humans vs bots, pro vs anti Trump"""
#Plot number of tweets per day, humans vs bots, pro vs anti Trump
#supported values are '-', '--', '-.', ':', 'None', ' ', '', 'solid', 'dashed', 'dashdot', 'dotted'
sns.set_style("whitegrid")
f, ax = plt.subplots(figsize=(16, 8))
plt.subplot(1,1,1)
#sns.lineplot(data = df_date,x='datetime', y = 'num_bot_tweets',ls= 'solid',marker= "o",label = 'Number of bots',color = 'purple')
sns.lineplot(data = df_date,x='datetime', y = 'num_bot_0_tweets',ls= 'solid',marker= "X",label = 'Anti-Trump',color = 'blue')
sns.lineplot(data = df_date,x='datetime', y = 'num_bot_1_tweets',ls= 'solid',marker= "X",label = 'Pro-Trump',color = 'red')
plt.ylabel("Number of bot tweets",fontsize = 16)
plt.xlabel("Date",fontsize = 16)
plt.legend()
#plt.subplot(2,1,2)
#sns.lineplot(data = df_date,x='datetime', y = 'num_human_tweets',ls= 'solid',marker= "s",label = 'Number of users',color = 'purple')
sns.lineplot(data = df_date,x='datetime', y = 'num_human_0_tweets',ls= 'solid',marker= "s",label = 'Anti-Trump',color = [0,0,.75])
sns.lineplot(data = df_date,x='datetime', y = 'num_human_1_tweets',ls= 'solid',marker= "s",label = 'Pro-Trump',color = [0.75,0,0])
plt.ylabel("Number of tweets",fontsize = 16)
plt.xlabel("Date",fontsize = 16)
plt.legend()
plt.show()
"""## Plot bot impact per day"""
sns.set_style("whitegrid")
f, ax = plt.subplots(figsize=(16, 8))
sns.lineplot(data = df_date,x='datetime', y = 'mean_opinion_equilibrium_nobot', ls= 'solid',marker= "o", label = 'No bots', color = 'gray')
sns.lineplot(data = df_date,x='datetime', y = 'mean_opinion_equilibrium_bot', ls= 'solid',marker= "o", label = 'Bots', color = 'purple')
plt.ylabel("Mean Pro-Trump Opinion",fontsize = 16)
plt.xlabel("Date",fontsize = 16)
plt.legend()
df_date['opinion_shift'] = df_date.mean_opinion_equilibrium_bot - df_date.mean_opinion_equilibrium_nobot
sns.set_style("whitegrid")
f, ax = plt.subplots(figsize=(12, 8))
pal = sns.color_palette("coolwarm", len(df_date)) #https://seaborn.pydata.org/tutorial/color_palettes.html#diverging-color-palettes
rank = -df_date.opinion_shift.argsort().argsort() # http://stackoverflow.com/a/6266510/1628638
sns.lineplot(data = df_date,x='datetime', y = 'opinion_shift')
sns.barplot(data = df_date,x='datetime', y = 'opinion_shift', palette=np.array(pal[::-1])[rank])
plt.ylabel("Bot induced opinion shift",fontsize = 16)
plt.xlabel("Date",fontsize = 16)
"""## Plot bot reach
Plot number of unique followers of pro and anti-Trump bots
"""
sns.set_style("whitegrid")
f, ax = plt.subplots(figsize=(16, 8))
#sns.lineplot(data = df_date,x='datetime', y = 'num_bot_tweets',ls= 'solid',marker= "o",label = 'Number of bots',color = 'purple')
sns.lineplot(data = df_date,x='datetime', y = 'reach_bot_0',ls= 'solid',marker= "o",label = 'Anti-Trump',color = 'blue')
sns.lineplot(data = df_date,x='datetime', y = 'reach_bot_1',ls= 'solid',marker= "o",label = 'Pro-Trump',color = 'red')
plt.ylabel("Number of unique bot followers",fontsize = 16)
plt.xlabel("Date",fontsize = 16)
plt.legend() | 9,005 | 38.156522 | 139 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/botcode_v2/ising_model_bot_detector.py | # -*- coding: utf-8 -*-
"""Ising Model Bot Detector.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Ou1TXypk5YA-DxSFRi55HsNwiELue7Cl
# Ising Model Bot Detection
This notebook lets you detect bots in a retweet network using the Ising model algorithm from the paper "Detecting Bots and Assessing Their Impact in Social Networks" https://arxiv.org/abs/1810.12398.
You will need a retweet graph saved as a networkx object and the helper file networkClassifierHELPER.py and the file ioHELPER.py
"""
import os
import sys
import math
import datetime, time
import random
import numpy as np
import networkx as nx
import sqlite3,sys,os,string
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import metrics
from scipy.sparse import csc_matrix
from networkClassifierHELPER import *
"""## Load Retweet graph
path_data = folder where the retweet graph file is
Gretweet = retweet graph networkx DiGraph object.
The format of the edges is (u,v,$weight$).
This means u retweeted v $weight$ times.
Another way to view this is a node's out-degree is the number of nodes it retweeted, a node's in-degree is the number of people who retweet it.
We expect dout to be large for bots, din to be large for popular humans.
"""
path_data = "C://Users//Zlisto//Dropbox (Personal)//MIDAC//BotDetection//Pizzagate//"
fname_Gretweet = path_data + "Gretweet_pizzagate.gpickle"
Gretweet = nx.read_gpickle(fname_Gretweet)
n = Gretweet.number_of_nodes()
ne = Gretweet.number_of_edges()
Din = Gretweet.in_degree(weight='weight')
Dout = Gretweet.out_degree(weight='weight')
Dout_list = [x[1] for x in Dout]
Din_list = [x[1] for x in Din]
print("Gretweet has %s node, %s edges"%(n,ne))
print("Nodes max retweet = %.2f"%max(Dout_list))
print("Nodes max retweet count %.2f"%max(Din_list))
"""## Define Ising model algorithm parameters
Use the default values provided here.
lambdaij = edge energy scale factor for node type $i$ retweeting node type $j$. The types are human = 0, bot = 1.
epsilon = edge energy parameter
alpha_out = upper quantiles of out-degree of graph (how much a node retweets)
alpha_in = upper quantiles of in-degree of graph (how much a node is retweeted)
mu = scale of edge energies w.r.t. node energies. Set this to 1.
"""
print("Define Ising model parameters")
mu = 1
epsilon = 10**(-3) ##named delta in paper, should be close to 0 (eg. 0.001) in order for lambda10 to be slightly > to lambda00+lambda11-1.
lambda01 = 1
lambda00 = 0.61
lambda11 = 0.83
lambda10 = lambda00+ lambda11 - lambda01 + epsilon
q=0.999
alpha_in = np.quantile(Din_list,q)
alpha_out = np.quantile(Dout_list,q)
alpha=[mu,alpha_out,alpha_in]
print("alpha_out = %.2f"%alpha_out)
print("alpha_in = %.2f"%alpha_in)
"""## Create energy graph
PiBot = prior probability of being a bot for each node
link_data = list of edges, bidirectional indicator, and weight
edgelist_data = list of edges, edge energies
"""
PiBot = {}
for v in Gretweet.nodes():
PiBot[v]=0.5
#link_data[i] = [u,v,is (u,v) in E, is (v,u) in E, number times u rewteets v]
link_data = getLinkDataRestrained(Gretweet)
start_time = time.time()
print("Make edgelist_data")
#edgelist_data[i] = [u,v,(Psi00,Psi01,Psi10,Psi11)], these are the edge energies
#on edge (i,j) for the graph cut
edgelist_data =[(i[0], i[1], psi(i[0],i[1],i[4], Din, Dout,alpha,lambda00,lambda11,epsilon)) for i in link_data]
print("\tEdgelist has %s edges"%len(edgelist_data))
print("--- %s seconds ---" % (time.time() - start_time))
"""## Find Min-Cut of energy graph
H = energy graph
BotsIsing = list of nodes who are bots in min-cut
HumansIsing = list of nodes who are humans in min-cut
"""
start_time = time.time()
print("Cut graph")
H, BotsIsing, user_data = computeH(Gretweet, PiBot, edgelist_data, Dout, Din)
Nodes = []
for v in Gretweet.nodes(): Nodes.append(v)
HumansIsing = list(set(Nodes) - set(BotsIsing))
print('\tCompleted graph cut')
print("%s bots in %s nodes"%(len(BotsIsing),Gretweet.number_of_nodes()))
print("--- %s seconds ---" % (time.time() - start_time))
"""## Calculate Bot Probability
Find the probability each node is a bot using classification found from min-cut of energy graph.
THIS TAKES A LONG TIME
PiBotFinal = dictionary of bot probabilities.
"""
start_time = time.time()
print("Calculate bot probability for each labeled node in retweet graph")
PiBotFinal = {}
for counter,node in enumerate(Gretweet.nodes()):
if counter%1000==0:print("Node %s"%counter)
if node in Gretweet.nodes():
neighbors=list(np.unique([i for i in nx.all_neighbors(H,node) if i not in [0,1]]))
ebots=list(np.unique(np.intersect1d(neighbors,BotsIsing)))
ehumans=list(set(neighbors)-set(ebots))
psi_l= sum([H[node][j]['capacity'] for j in ehumans])- sum([H[node][i]['capacity'] for i in ebots])
psi_l_bis= psi_l + H[node][0]['capacity'] - H[1][node]['capacity'] ##probability to be in 1 = notPL
if (psi_l_bis)>12:
PiBotFinal[node] = 0
else:
PiBotFinal[node] = 1./(1+np.exp(psi_l_bis)) #Probability in the target (0) class
print("--- %s seconds ---" % (time.time() - start_time))
"""## Save probabilities to file
Convert dictionary of bot probabilities to a dataframe and write to a csv file.
"""
dfPiBot = pd.DataFrame(list(PiBotFinal.items()),columns = ['screen_name','bot_probability'])
FilenamePiBot = path_data+ "Pibot_pizzagate.csv"
dfPiBot.to_csv(FilenamePiBot)
print("Wrote bot probabilities to %s"%FilenamePiBot)
"""## Histogram of Bot Probabilities
Plot a histogram of the bot probabilities so you can see what a good threshold is
"""
data = dfPiBot.bot_probability
num_bins = round(len(data)/10)
counts, bin_edges = np.histogram (data, bins=num_bins, normed=True)
cdf = np.cumsum (counts)
plt.plot (bin_edges[1:], cdf/cdf[-1])
plt.grid()
plt.xlabel("Bot probability")
plt.ylabel("CDF")
nlow = len(dfPiBot[dfPiBot.bot_probability<0.5])
nhigh = len(dfPiBot[dfPiBot.bot_probability>0.5])
nmid = len(dfPiBot[dfPiBot.bot_probability==0.5])
print("%s users bot prob<0.5\n%s users bot prob>0.5\n%s users bot prob=0.5\n"%(nlow,nmid,nhigh))
plt.hist(dfPiBot.bot_probability[dfPiBot.bot_probability<0.5]);
plt.hist(dfPiBot.bot_probability[dfPiBot.bot_probability>0.5]);
plt.grid()
plt.xlabel("Bot probability")
plt.ylabel("Frequency")
plt.title("No 0.5 probability users")
| 6,426 | 31.296482 | 201 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/botcode_v2/networkClassifierHELPER.py | import math
import networkx as nx
from collections import defaultdict
from operator import itemgetter
import numpy as np
import time
from ioHELPER import *
#####################################################################################################
####################### BUILD RETWEET NX-(SUB)GRAPH FROM DICTIONNARY ################################
#####################################################################################################
'''
Takes as input a csv file of retweet relationships and builds
a NetworkX object, in order to apply prebuilt mincut algorithms
'''
def buildRTGraph(graph, subNodes, lowerBound=0):
'''
INPUTS:
## graph (csv file)
a csv file with ID of user retweeting, user retweeted, and number of retweets. (see README for more details)
## subNodes (list of ints)
a list of users IDs if you want to only consider a subgraph of the RT graph
## lowerBound (int)
an int to only consider retweet relationship if retweet count from User1 to User2 is above bound (sparsify graph)
'''
G = nx.DiGraph()
count = 0
firstInter = list(np.unique(np.intersect1d(subNodes, list(graph.keys()))))
for node in firstInter:
count+=1
print("at user n" + str(count) + " on " + str(len(graph)))
unique2, counts = np.unique(graph[node], return_counts=True)
res = dict(zip(unique2, counts))
inter = np.unique(np.intersect1d(unique2,subNodes))
for i in inter:
w=res[i]
if(i!=node and w >= lowerBound):
G.add_node(node)
G.add_node(i)
G.add_edge(node, i, weight = w)
return G;
############################################################################
####################### BUILD/CUT ENERGY GRAPH #############################
############################################################################
'''
Takes as input the RT graph and builds the energy graph.
Then cuts the energy graph to classify
'''
def computeH(G, piBot ,edgelist_data, graph_out, graph_in):
H=nx.DiGraph()
'''
INPUTS:
## G (ntwkX graph)
the Retweet Graph from buildRTGraph
## piBot (dict of floats)
a dictionnary with prior on bot probabilities. Keys are users_ids, values are prior bot scores.
## edgelist_data (list of tuples)
information about edges to build energy graph.
This list comes in part from the getLinkDataRestrained method
## graph_out (dict of ints)
a graph that stores out degrees of accounts in retweet graph
## graph_in (dict of ints)
a graph that stores in degrees of accounts in retweet graph
'''
user_data={i:{
'user_id':i,
'out':graph_out[i],
'in':graph_in[i],
'old_prob': piBot[i],
'phi_0': max(0,-np.log(float(10**(-20)+(1-piBot[i])))),
'phi_1': max(0,-np.log(float(10**(-20)+ piBot[i]))),
'prob':0,
'clustering':0
} for i in G.nodes()}
set_1 = [(el[0],el[1]) for el in edgelist_data]
set_2 = [(el[1],el[0]) for el in edgelist_data]
set_3 = [(el,0) for el in user_data]
set_4 = [(1,el) for el in user_data]
H.add_edges_from(set_1+set_2+set_3+set_4,capacity=0)
for i in edgelist_data:
val_00 = i[2][0]
val_01 = i[2][1]
val_10 = i[2][2]
val_11 = i[2][3]
#edges between nodes
H[i[0]][i[1]]['capacity']+= 0.5*(val_01+val_10-val_00-val_11)
H[i[1]][i[0]]['capacity'] += 0.5*(val_01+val_10-val_00-val_11)
#edges to sink (bot energy)
H[i[0]][0]['capacity'] += 0.5*val_11+0.25*(val_10-val_01)
H[i[1]][0]['capacity'] += 0.5*val_11+0.25*(val_01-val_10)
#edges from source (human energy)
H[1][i[0]]['capacity'] += 0.5*val_00+0.25*(val_01-val_10)
H[1][i[1]]['capacity'] += 0.5*val_00+0.25*(val_10-val_01)
if(H[1][i[0]]['capacity']<0):
print("Neg capacity")
break;
if(H[i[1]][0]['capacity']<0):
print("Neg capacity")
break;
if(H[1][i[1]]['capacity']<0):
print("Neg capacity")
break;
if(H[i[0]][0]['capacity']<0):
print("Neg capacity")
break;
for i in user_data.keys():
H[1][i]['capacity'] += user_data[i]['phi_0']
if(H[1][i]['capacity'] <0):
print("Neg capacity");
break;
H[i][0]['capacity'] += user_data[i]['phi_1']
if(H[i][0]['capacity'] <0):
print("Neg capacity");
break;
cut_value,mc=nx.minimum_cut(H,1,0)
Bots = list(mc[0]) #mc = [nodes dont cut source edge (bots), nodes dont cut sink edge (humans)]
if 0 in Bots: #wrong cut set because nodes have sink edge (humans)
print("Double check")
Bots = list(mc[1])
Bots.remove(1)
return H, Bots, user_data
###############################################################################
####################### COMPUTE EDGES INFORMATION #############################
###############################################################################
'''
Takes as input the RT graph and retrieves information on edges
to further build H.
'''
def getLinkDataRestrained(G):
'''
INPUTS:
## G (ntwkX graph)
the Retweet Graph from buildRTGraph
'''
edges = G.edges(data=True)
e_dic = dict(((x,y), z['weight']) for x, y, z in edges)
link_data = []
for e in e_dic:
i=e[0]
j=e[1]
rl=False
wrl=0
if((j,i) in e_dic.keys()):
rl = True
wrl = e_dic[(j,i)]
link_data.append([i,j,True,rl, e_dic[e], wrl])
return link_data;
##########################################################################
####################### POTENTIAL FUNCTION ###############################
##########################################################################
'''
Compute joint energy potential between two users
'''
#INPUTS:
## u1 (int) ID of user u1
## u2 (int) ID of user u2
## wlr (int) number of retweets from u1 to u2
## out_graph (dict of ints) a graph that stores out degrees of accounts in retweet graph
## in_graph (dict of ints) a graph that stores in degrees of accounts in retweet graph
## alpha (list of floats)
## a list containing hyperparams (mu, alpha_out, alpha_in)
## lambda00 = ratio of psi_00 to psi_01
## lambda11 = ratio of psi_11 to psi_01
## epsilon (int)
## exponent such that lambda01=lambda11+lambda00-1+epsilon
def psi(u1, u2, wlr, in_graph, out_graph,alpha,lambda00,lambda11,epsilon):
dout_u1 = out_graph[u1] #outdegree of u1 (number of retweets it did)
din_u2 = in_graph[u2] #indegree of u2 (number of retweets it received)
if dout_u1==0 or din_u2==0:
print("Relationship problem: "+str(u1)+" --> "+str(u2))
temp = alpha[1]/float(dout_u1)-1 + alpha[2]/float(din_u2)-1
if temp <10:
psi_01 =wlr*alpha[0]/(1+np.exp(temp))
else:
psi_01=0
lambda01 = 1
lambda10 = lambda00 + lambda11 -1 + epsilon
psi_00 = lambda00*psi_01
psi_01 = lambda01*psi_01
psi_10 = lambda10*psi_01
psi_11 = lambda11*psi_01
return [psi_00,psi_01,psi_10,psi_11];
| 6,741 | 30.069124 | 115 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/botcode_v2/ioHELPER.py | import numpy as np
from os import listdir
from os.path import isfile, join
import datetime
import networkx as nx
def readCSVFile_urls(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
if(len(line) > 0):
temp = line.split(';')
res[temp[0]] = temp[1]
return res;
def readCSVFile_urls_urls(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
if(len(line) > 0):
try:
temp = line.split(';')
res[temp[0]] = [int(i) for i in temp[1][1:-1].split(', ')]
except:
continue;
return res;
def readCSVFile_urls_users(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
if(len(line) > 0):
temp = line.split(';')
res[int(temp[0])] = temp[1]
return res;
def readCSVFile_screenNames(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
if(len(line) > 0):
temp = line.split(';')
res[int(temp[0])] = temp[1]
return res;
def readCustomFile(path):
file = open(path, 'r').read().split('\n')
res = []
for line in file:
if(len(line) >0):
res.append(line)
return res;
def readCustomDic_Pibot(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
temp = line.split(';')
if(len(temp) > 1):
res[int(temp[0])] = float(temp[1])
return res;
def readCustomDic_hashtagsPibot(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
temp = line.split(';')
if(len(temp) > 1):
res[temp[0]] = float(temp[1])
return res;
def readCustomTemp(path):
file = open(path, 'r').read().split('\n')
res = []
for line in file:
temp = line.split(';')
if len(temp)>1:
res.append(temp)
return res;
def readCustomDic(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
temp = line.split(';')
res[temp[0]] = temp[1:]
return res;
def readCustomDic_graph(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
temp = line.split(';')
if(len(temp) > 1):
res[int(temp[0])] = [int(i) for i in temp[1:]]
return res;
def readCustomDic_folGraph(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
if(len(line)>0):
temp = line.split(';')
res[int(temp[0])] = [int(i) for i in temp[1:]]
return res;
def readCustomDic_interRTTimes(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
temp = line.split(';')
if(len(temp) > 1):
res[int(temp[0])] = [float(i) for i in temp[1:]]
return res;
def readCustomDic_table(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file:
temp = line.split(';')
if(len(temp) > 1):
res[int(temp[0])] = [str(i) for i in temp[1:]]
return res;
def readCustomDic_index(path):
file = open(path, 'r').read().split('\n')
res = {}
for line in file[:-1]:
temp = line.split(';')
user = int(temp[0])
sub_dic={}
for i in temp[1:-1]:
key=i.split("-->")[0]
value=i.split("-->")[1]
sub_dic[key] = value
res[user]=sub_dic
return res;
def writeCSVFile(path, list):
file = open(path, 'w')
for i in list:
line = str(i)
print(line)
file.write(line)
file.write('\n')
file.close()
return 0;
def completeCSVFile(path, list):
file = open(path, 'a')
for i in list:
line = str(i)
print(line)
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_screenNames(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user)
SN = dic[user]
line += ';' + SN
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_table(path, table):
file = open(path, 'w')
count=0
file.write(table[0][0]+';'+table[0][1]+';'+table[0][2]+';'+table[0][3]+';'+table[0][4]+';'+table[0][5])
file.write('\n')
for i in table[1:]:
count+=1
print("at user n"+str(count))
data = [str(j) for j in i]
line = ';'.join(data)
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_dic(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user)
if(len(dic[user]) > 0):
friends = [str(i) for i in dic[user]]
line += ';'
line += ';'.join(friends)
else:
line+=';'
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_dic_urls(path, dic):
file = open(path, 'w')
for url in dic.keys():
line = str(url)
if(len(dic[url]) > 0):
line += ';'
line += dic[url]
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_interRTTimes(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user)
if(len(dic[user]) > 0):
intertimes = [str(i) for i in dic[user]]
line += ';'
line += ';'.join(intertimes)
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_positions(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user)
if(len(dic[user]) > 0):
line += ';'
x = str(dic[user][0])
line += x
line+= ';'
y = str(dic[user][1])
line += y
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_datetimes(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user)
if(len(dic[user]) > 0):
intertimes = [i.strftime('%m/%d/%Y - %H:%M:%S') for i in dic[user]]
line += ';'
line += ';'.join(intertimes)
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_piBot(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user) + ';' + str(dic[user])
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_index(path, dic):
file = open(path, 'w')
for user in dic.keys():
line = str(user) + ';'
sub_dic = dic[user]
for key in sub_dic:
line = line + str(key) + "-->" + str(sub_dic[key]) + ";"
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_G(path, G):
file = open(path, 'w')
for i in G.edges(data=True):
line = str(i[0]) + ';' + str(i[1]) + ';' + str(i[2]['weight'])
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_Gzero(path, G):
file = open(path, 'w')
for i in G.edges(data=True):
line = str(i[0]) + ';' + str(i[1])
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_H(path, G):
file = open(path, 'w')
for i in G.edges(data=True):
line = str(i[0]) + ';' + str(i[1]) + ';' + str(i[2]['capacity'])
file.write(line)
file.write('\n')
file.close()
return 0;
def writeCSVFile_undirG(path, G):
file = open(path, 'w')
for i in G.edges(data=True):
if(type(i[0])==np.str_):
url = i[0]
user = i[1]
else:
url=i[1]
user=i[0]
line = str(user) + ';' + str(url)
file.write(line)
file.write('\n')
file.close()
return 0;
def readCSVFile_positions(path):
positions = {}
file = open(path, 'r').read().split('\n')
for i in file:
if(len(i)>0):
split = i.split(';')
positions[int(split[0])]=np.array([float(split[1]),float(split[2])])
return positions;
def readCSVFile_undirG(path):
G = nx.Graph()
file = open(path, 'r').read().split('\n')
for i in file:
if(len(i)>0):
split = i.split(';')
G.add_edge(int(split[0]),int(split[1]),weight = float(split[2]))
return G;
def readCSVFile_undir_unweighted(path):
G = nx.Graph()
file = open(path, 'r').read().split('\n')
for i in file:
if(len(i)>0):
split = i.split(';')
G.add_edge(int(split[0]),int(split[1]))
return G;
def readCSVFile_G(path):
G = nx.DiGraph()
file = open(path, 'r').read().split('\n')
for i in file:
if(len(i)>0):
split = i.split(';')
G.add_edge(int(split[0]),int(split[1]),weight = float(split[2]))
return G;
def readCSVFile_Gzero(path):
G = nx.DiGraph()
file = open(path, 'r').read().split('\n')
for i in file:
if(len(i)>0):
split = i.split(';')
G.add_edge(int(split[0]),int(split[1]))
return G;
def readCSVFile_H(path):
H = nx.DiGraph()
file = open(path, 'r').read().split('\n')
for i in file:
if(len(i)>0):
split = i.split(';')
H.add_edge(int(split[0]),int(split[1]), capacity = float(split[2]))
return H;
# def read_data(dirname):
# fileList = [ f for f in listdir(dirname) if isfile(join(dirname,f)) ]
# dic = {}
# for f in fileList:
| 8,254 | 18.939614 | 104 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/bot_impact/assess_bot_impact.py | # -*- coding: utf-8 -*-
"""AssessBotImpact.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1idq0xOjN0spFYCQ1q6JcH6KdpPp8tlMb
# Assess Bot Impact
This code will calculate the mean opinion shift caused by the bots in your network.
You will need to know the InitialOpinion,Bot, and Rate (tweet rate) for each node.
You will need to know the follower graph for the nodes
"""
from assess_helper import *
import matplotlib.pyplot as plt
import numpy as np
import pandas as df
from scipy import sparse
import sys
"""## Input Files
These are the input file u need to make for the assessment stage.
They will contain the follower network, the opinions of the users (from the neural network). The identities of bots (from the bot detector code), and the stubborn users (we get this from the opinions, but assume now its been figured out)
INPUT:
node_filename = file with node info. Format is (id,InitialOpinion,Stubborn,Rate,Bot,friend_count, follower_count)
follower_graph_filename = file with following of each node in the network.
format is (follower, following1,following2,following3,...)
G_filename = filename for networkx object for entire follower network. Make sure it ends in .gpickle. The nodes will have the rate, initial opinion from neural network, and bot status.
Gbot_filename = filename for networkx object for follower network reachable from stubborn users. Make sure it ends in .gpickle. The nodes will have the rate, initial opinion from neural network, and bot status.
assess_csv_filename = csv file with opinions of each user with and without bots. This is for plotting purposes.
"""
#Test files
node_filename = "test_nodes.csv" #format is (id,InitialOpinion,Stubborn,Rate,Bot, friend_count, follower_count)
follower_graph_filename = "test_follower_graph.csv" #format is (follower, following1,following2,following3,...)
G_filename = 'G.gpickle'
Gbot_filename = 'G_bot.gpickle'
assess_csv_filename = "assess_test.csv"
#country = "India"
#path_data = "C:\\Users\\Zlisto\\Dropbox (Personal)\\MIDAC\\UNWomen\\"
#node_filename =path_data+"Nodes_%s_All.csv"%country
#follower_graph_filename = path_data+ "friends_graph_%s_combined.csv"%country
#G_filename = path_data+ "G_%s.gpickle"%country
#G_bot_follower_filename = path_data + "friends_graph_%s_bot_followers.csv"%country
#Gbot_filename = path_data+"Gbot_UNWomen_%s.gpickle"%country
#ff_filename = path_data+ "sn_ff_%s_all.csv"%country
#assess_csv_filename = path_data + "assess_%s.csv"%country
"""## Histogram Neural Network Opinions"""
df = pd.read_csv(node_filename)
plt.hist(df.InitialOpinion,1000);
plt.grid()
plt.xlabel("Opinion",fontsize = 18)
plt.ylabel("Count",fontsize = 18)
plt.show()
"""## Choose Opinion Thresholds
Choose opinion thresholds to determine who is stubborn.
INPUT:
threshold_low = highest opinion of stubborn users in lower interval
threshold_high= lowest opinion of stubborn users in upper interval
OUTPUT:
G = networkx object with all node and network info. This is what you will need for the assess steps.
"""
#threshold_low = np.quantile(df.InitialOpinion,0.05)
#threshold_high= np.quantile(df.InitialOpinion,0.95)
threshold_low = 0.1
hreshold_high= 0.9
G = G_from_follower_graph(node_filename,follower_graph_filename,threshold_low,threshold_high) #create network_x graph object
nx.write_gpickle(G, G_filename)
print("Wrote network to file. Network as %s nodes and %s edges"%(G.number_of_nodes(),G.number_of_edges()))
#G = nx.read_gpickle(G_filename)
if G.number_of_nodes()<=100:
pos = nx.spring_layout(G)
nx.draw(G,pos=pos)
nx.draw_networkx_labels(G,pos=pos)
"""## Prepare Reachable Subgraph
This function builds a subgraph that contains the stubborn users and anyone they can reach.
We need this step because if you cannot be reached by a stubborn user, my model has no way to determine ur opinion.
INPUT:
G = follower network with node information (neural network opinion, rate, bot status)
OUTPUT:
Gbot0 = sugbraph of G that has only nodes that can be reached by at least one stubborn node.
"""
(Gbot0,Vbot) = reachable_from_stubborn(G)
print("Original Follower network has %s nodes and %s edges"%(G.number_of_nodes(),G.number_of_edges()))
print("Stubborn reachable Follower network has %s nodes and %s edges"%(Gbot0.number_of_nodes(),Gbot0.number_of_edges()))
nx.write_gpickle(Gbot0.copy(),Gbot_filename)
if Gbot0.number_of_nodes()<=100:
pos = nx.spring_layout(Gbot0)
nx.draw(Gbot0,pos=pos)
nx.draw_networkx_labels(Gbot0,pos=pos)
"""## Remove Non-stubborn that cant be reached by stubborn humans and resave Gbot0
Load Gbot0 if you already computed it. Then keep only nodes
which are not reachable only by bots. These users cannot be solved
when you remove the bots. Resave Gbot0.
INPUT:
Gbot0 = sugbraph of G that has only nodes that can be reached by at least one stubborn node.
OUTPUT:
Gbot0 = sugbraph of G that has only nodes that can be reached by at least one $\textbf{human}$ stubborn node.
"""
#Use this to read Gbot if you saved it already. For debugging purposes
Gbot0 = nx.read_gpickle(Gbot_filename)
nv = Gbot0.number_of_nodes()
ne = Gbot0.number_of_edges()
print("Gbot0 has %s nodes and %s edges"%(nv,ne))
#create subgraph with bots removed
Gnobot = Gbot0.subgraph([x for x in Gbot0.nodes if Gbot0.nodes[x]["Bot"]==0])
print("Find all nodes reachable from stubborn nodes in Gnobot")
_,Vnobot = reachable_from_stubborn(Gnobot)
#get list of bot and human names
Bots = [x for x in Gbot0.nodes if Gbot0.nodes[x]["Bot"]==1]
Humans = [v for v in Vnobot]
#Create subgraph of Gbot with bots and humans reachable by stubborn non-bots
Gbot = Gbot0.subgraph(Bots+Humans)
#save Gbot
nv = Gbot.number_of_nodes()
ne = Gbot.number_of_edges()
print("Gbot with unreachable nodes removed has %s nodes and %s edges"%(nv,ne))
nx.write_gpickle(Gbot.copy(),Gbot_filename)
"""## Load Gbot
Use this block if you already save Gbot0 with unreachable humans removed.
"""
Gbot0 = nx.read_gpickle(Gbot_filename)
nv = Gbot0.number_of_nodes()
ne = Gbot0.number_of_edges()
print("Gbot0 with unreachable nodes removed has %s nodes and %s edges"%(nv,ne))
"""## NOT NEEDED: Add in edges from bots to their followers
Edges = []
ne=0 #edge counter
new_edges = 0
with open(G_bot_follower_filename) as fp:
for cnt, line in enumerate(fp):
line = line.strip('\n')
users =line.split(",")
following = users[0]
if following in Gbot0.nodes():
followers = users[1:]
for follower in followers:
if follower in Gbot0.nodes():
if not(Gbot0.has_edge(following, follower)):
ne+=1
rate = Gbot0.nodes[following]['Rate']
Gbot0.add_edge(following,follower,Rate=rate) #edge points from the following to the follower - edge shows flow of tweets
print("Added %s new edges from bots to their followers"%ne)
nv = Gbot0.number_of_nodes()
ne = Gbot0.number_of_edges()
print("Gbot0 with unreachable nodes removed has %s nodes and %s edges"%(nv,ne))
## Make sure all bots are stubborn
"""
for node in Gbot0.nodes():
if (Gbot0.nodes[node]['Bot']==1) and (Gbot0.nodes[node]['Stubborn']==0):
Gbot0.nodes[node]['Stubborn']=1
print("Updated bot stubborn label so all bots are stubborn\n")
nx.write_gpickle(Gbot0.copy(),Gbot_filename)
"""## Risk Index Calculation
This function calculates the risk index, which equals the shift in the mean opinion of all users (bot and human ) in the network.
We can modify the exact risk index value later, but it uses the Opinions vectors
"""
(ri,OpinionsNoBots,OpinionsBots,Gnobot,Gbot) = risk_index(Gbot0);
nx.write_gpickle(Gbot.copy(),Gbot_filename)
MeanOpinionBots = np.mean(OpinionsBots)
MeanOpinionNoBots = np.mean(OpinionsNoBots)
print("\nMean opinion with no bots = %s"%MeanOpinionNoBots)
print("Mean opinion with bots = %s"%MeanOpinionBots)
print("Risk Index = %.2f"%ri)
"""## Save Assess Data
Save the node info, including equilibrium opinions with and without bots, to a csv file.
"""
def G_to_df(G):
X = []
for node in G.nodes(data=True):
X.append(node[1])
df = pd.DataFrame(X)
return df
df = pd.read_csv(node_filename)
df_bot = G_to_df(Gbot)
df_nobot = G_to_df(Gnobot)
df = df.rename(columns={"id": "ScreenName", "InitialOpinion": "OpinionNeuralNet"})
df_bot = df_bot.rename(columns={"Name": "ScreenName", "FinalOpinion": "OpinionEquilibriumBot"})
df_nobot = df_nobot.rename(columns={"Name": "ScreenName", "FinalOpinion": "OpinionEquilibrium"})
X = df_bot[df_bot.Bot==1]
X=X.rename(columns={"OpinionEquilibriumBot": "OpinionEquilibrium"})
df_nobot = df_nobot.append(X)
#get node degrees in observed network
X = []
for sd in list(Gbot.in_degree()):
x = {"ScreenName":sd[0],"in_degree":sd[1]}
X.append(x)
Din = pd.DataFrame(X)
X = []
for sd in list(Gbot.out_degree()):
x = {"ScreenName":sd[0],"out_degree":sd[1]}
X.append(x)
Dout = pd.DataFrame(X)
df_degree = pd.merge(Din,Dout,on="ScreenName",how="inner")
df.set_index('ScreenName')
df_bot.set_index('ScreenName')
df_nobot.set_index('ScreenName')
#df_bot.to_csv(path_data+"df_bot.csv")
#df_nobot.to_csv(path_data+"df_nobot.csv")
df_bot = pd.merge(df,df_bot,on="ScreenName",how="inner")
df_bot = pd.merge(df_degree,df_bot,on = "ScreenName", how = "inner")
df_assess = pd.merge(df_bot,df_nobot, on = "ScreenName")
df_assess = df_assess[["ScreenName","OpinionNeuralNet","OpinionEquilibriumBot",
"OpinionEquilibrium","Bot","Stubborn","Rate",
"friend_count","follower_count",
"in_degree","out_degree"]]
df_assess.to_csv(assess_csv_filename)
print("Saved assess data to %s"%assess_csv_filename)
df_assess.head()
"""## Plot Opinion Shift
Plot bar graph of mean opinion with and without bots.
"""
X = np.asarray((1,2))
Y = np.asarray((MeanOpinionNoBots,MeanOpinionBots))
plt.bar(X,Y,color=['blue', 'red'],edgecolor='black')
plt.grid()
plt.xticks(X, ["No bots","Bots"],fontsize=14)
plt.ylim([0, 1]);
plt.ylabel("Mean opinion",fontsize = 18)
plt.title("Risk Index = %.2f"%ri,fontsize = 14);
plt.show()
"""## Count number bot human followers
Count how many non-stubborn users follow the bots in the follower network
reachable by stubborn humans.
"""
(nbot_followers,nbots,Vbot_followers) = bot_neighbor_count(Gbot)
nv = Gbot.number_of_nodes()
print("%s bots have a total of %s followers in a network of %s users"%(nbots,nbot_followers,nv))
"""## Number of Bots followed by each human
Look at how many humans follow different numbers of bots.
"""
Nbots = []
S =[]
for sn in Gbot.nodes():
if Gbot.nodes[sn]['Stubborn']==0:
nb = Gbot.predecessors(sn)
bots = [v for v in nb if Gbot.nodes[v]['Bot']==1]
Nbots.append(len(bots))
S.append(sn)
df_nbots = pd.DataFrame(list(zip(S ,Nbots)),
columns =['ScreenName', 'Nbots'])
print("Calculated how many bots each human follows")
for nbots in range(0,10):
nusers = len(df_nbots[df_nbots.Nbots==nbots])
print("%s users follow %s bots"%(nusers,nbots))
| 11,179 | 32.573574 | 237 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/start/bot_impact/assess_helper.py | import json,random,csv
import numpy as np
from scipy import sparse
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
#code for helper file
#create networkx graph object from node and edge list csv files
def G_from_edge_list(node_filename,edge_filename):
G = nx.DiGraph()
data_nodes = pd.read_csv(node_filename)
for row in data_nodes.iterrows():
G.add_node(row[1]['id'], Name = row[1]['id'],InitialOpinion=row[1]['InitialOpinion'],Stubborn = row[1]['Stubborn'],
Rate=row[1]['Rate'], FinalOpinion = row[1]['InitialOpinion'], Bot = row[1]['Bot'])
data = pd.read_csv(edge_filename)
for row in data.iterrows():
following = row[1]['following']
follower = row[1]['follower']
rate = G.nodes[following]['Rate']
#print("%s,%s %s"%(following,follower,rate))
G.add_edge(following,follower,Rate=rate)
return G
#create networkx graph object from node and follower graph csv files
#node file format is (id,InitialOpinion,Stubborn,rate,FinalOpinion,Bot)
#follower graph file format is (follower, following1,following2,following3,...)
def G_from_follower_graph(node_filename,follower_graph_filename,threshold_low,threshold_high):
G = nx.DiGraph()
print("Building network for Assess.\nStubborn intervals = (0,%.3f),(%.3f,1)"%(threshold_low,threshold_high))
#first get the nodes and their info and add it to the graph object G
data_nodes = pd.read_csv(node_filename)
data_nodes.Stubborn = 1*np.logical_or(data_nodes.InitialOpinion<=threshold_low, data_nodes.InitialOpinion>=threshold_high)
for row in data_nodes.iterrows():
G.add_node(row[1]['id'], Name = row[1]['id'],InitialOpinion=row[1]['InitialOpinion'],Stubborn = row[1]['Stubborn'],
Rate=row[1]['Rate'], FinalOpinion = row[1]['InitialOpinion'], Bot = row[1]['Bot'])
#second, add the edges to the graph if both nodes are in the node set
Edges = []
ne=0 #edge counter
with open(follower_graph_filename) as fp:
for cnt, line in enumerate(fp):
line = line.strip('\n')
users =line.split(",")
follower = users[0]
if follower in G.nodes():
followings = users[1:] #followings is a list of the people the follower follows
for following in followings:
if following in G.nodes():
ne+=1
rate = G.nodes[following]['Rate']
G.add_edge(following,follower,Rate=rate) #edge points from the following to the follower - edge shows flow of tweets
return G
#Calculate the final opinions of the non stubborn nodes, and return a new updated Graph object (for drawing purposes)
def final_opinions(Ginitial):
G = Ginitial.copy() #we will add in the final opinions to this network object
print("\tCalculating G,F,Psi matrices")
(Gmat,Fmat,Psi)= graph_to_GFPsi(G); #create the matrices we need for the opinion calculation.
#print("G = %s matrix\nF = %s matrix\nPsi = %s vector"%(Gmat.shape,Fmat.shape,Psi.shape))
b = Fmat @ Psi; #b = Fmat*Psi, just makes notation cleaner for later functions
print("\tSolving for opinions")
opinion_nonstubborn = sparse.linalg.bicgstab(Gmat,b)[0]; #solve linear system to get non-stubborn opinions
cnonstub=0
#now we update the final opinons in G
for node in G.nodes(data=True):
if node[1]['Stubborn']==0:
G.nodes[node[0]]['FinalOpinion'] = opinion_nonstubborn[cnonstub]
if opinion_nonstubborn[cnonstub]>1:
print("%s has opinion %s - not between 0 and 1"%(node,opinion_nonstubborn[cnonstub]))
cnonstub+=1
FinalOpinions = [ x[1]['FinalOpinion'] for x in G.nodes(data=True)] #create a FinalOpinions list
return (np.asarray(FinalOpinions),G) #return the Final opinions as an array and also return the update graph object
#function to create Gmat,Fmat,Psi matrices and vectors for equilibrium calculation
def graph_to_GFPsi(G):
n = int(len(G.nodes()))
n_stubborn = int(sum([node[1]['Stubborn'] for node in G.nodes(data=True)]))
n_nonstubborn = n-n_stubborn
#Gmat = np.zeros((n_nonstubborn,n_nonstubborn))
#Fmat= np.zeros((n_nonstubborn,n_stubborn))
Psi = np.zeros((n_stubborn,1))
G_Gmat ={} #dictionary: key= node name, value = index in Gmat
Gmat_G = {} #dictionary: key = index in Gmat, value = node name
G_Fmat ={} #dictionary: key = node name, value = index in Fmat and Psi
Fmat_G = {} #dictionary: key = index in Fmat and Psi, value = node name
data_G = []
row_G = []
col_G = []
data_F = []
row_F = []
col_F = []
#make dictionaries where I can look up the index of node in Gmat or Fmat.
cstub=0
cnonstub=0
for node in G.nodes(data=True):
name = node[1]['Name']
opinion = node[1]['InitialOpinion']
if node[1]['Stubborn']==1:
Fmat_G[cstub]=name
G_Fmat[name]=cstub
Psi[cstub] = opinion
cstub+=1
elif node[1]['Stubborn']==0:
G_Gmat[name] = cnonstub
Gmat_G[cnonstub]=name
cnonstub+=1
#Calculate diagonal elements of Gmat
for ind in range(cnonstub):
node = Gmat_G[ind]
w=0
for nb in G.predecessors(node):
w+=G.nodes[nb]['Rate']
row_G.append(ind)
col_G.append(ind)
data_G.append(w)
#Gmat[ind,ind] = w #positive sign here
#calculate off-diagonal elements of Gmat and Fmat
for edge in G.edges(data=True):
#print(edge)
following = edge[0]
follower = edge[1]
rate = G.nodes[following]['Rate'] #rate of following.
following_stub = G.nodes[following]['Stubborn']
follower_stub = G.nodes[follower]['Stubborn']
#print(follower,follower_stub,following,following_stub)
if follower_stub==0 and following_stub==0: #add an edge to Gmat because both non-stubborn
i_follower = G_Gmat[follower]
i_following = G_Gmat[following]
#Gmat[i_follower,i_following]= -rate #negative sign here
row_G.append(i_follower)
col_G.append(i_following)
data_G.append(-rate)
elif follower_stub==0 and following_stub==1:
i_follower = G_Gmat[follower]
i_following = G_Fmat[following]
#Fmat[i_follower,i_following]= rate #this sign is the opposite of Gmat
row_F.append(i_follower)
col_F.append(i_following)
data_F.append(rate)
Gmat = sparse.csr_matrix((data_G, (row_G, col_G)), shape=(n_nonstubborn, n_nonstubborn))
Fmat = sparse.csr_matrix((data_F, (row_F, col_F)), shape=(n_nonstubborn,n_stubborn))
return(Gmat,Fmat,Psi)
#calculate the risk index from a networkx object with opinions, stubborn, and bots
def risk_index(Gbot0):
Gnobot = Gbot0.subgraph([x for x in Gbot0.nodes if Gbot0.nodes[x]["Bot"]==0])
print("Solving for opinions with bots")
(X,Gbot) = final_opinions(Gbot0)
print("Solving for opinions without bots")
(X,Gnobot) = final_opinions(Gnobot)
OpinionsBot =[]
OpinionsNoBot =[]
print("Saving opinions to arrays")
for node in Gbot.nodes():
if Gbot.nodes[node]["Bot"]==0:
opinion_nobot = Gnobot.nodes[node]['FinalOpinion']
opinion_bot = Gbot.nodes[node]['FinalOpinion']
else:
opinion_nobot = Gbot.nodes[node]['FinalOpinion']
opinion_bot = Gbot.nodes[node]['FinalOpinion']
OpinionsBot.append(opinion_bot)
OpinionsNoBot.append(opinion_nobot)
OpinionsBot =np.asarray(OpinionsBot)
OpinionsNoBot =np.asarray(OpinionsNoBot)
ri = np.mean(OpinionsBot-OpinionsNoBot)
return (ri,OpinionsNoBot,OpinionsBot,Gnobot,Gbot)
#find all nodes reachable by a stubborn user and return corresponding subgraph
def reachable_from_stubborn(G):
ne = G.number_of_edges()
nv = G.number_of_nodes()
V={} #keep track of all reachable nodes
c = 0 #count how many nodes we iterate through
cprint = 1e3 #how often to print status
c_reach = 0 #count how many times we do reach calculation
Stub = [v for v in G.nodes if G.nodes[v]['Stubborn']==1]
nstub = len(Stub)
print("Checking reachable nodes from %s stubborn nodes"%nstub)
for node in Stub:
#print(node)
if not(node in V):
if (G.nodes[node]["Stubborn"]==1) or (G.nodes[node]["Bot"]==1):
reach=nx.dfs_postorder_nodes(G, source=node, depth_limit=ne)
V.update({i: 1 for i in [node]+list(reach)})
#print("\t%s"%V)
c_reach+=1
c+=1
if c%cprint==0: print("Node %s of %s, did reach check for %s nodes"%(c,nstub,c_reach))
#V = list(set(V))
print("Did reach check for only %s nodes out of %s"%(c_reach,nv))
Gbot = G.subgraph(V)
return (Gbot,V.keys())
def bot_neighbor_count(G):
Vbot_followers=[]
nbots = 0
for node in G.nodes():
if G.nodes[node]["Bot"]==1:
nbots +=1
nb = G.neighbors(node)
Vbot_followers = Vbot_followers+[v for v in nb if G.nodes[v]["Bot"]==0]
Vbot_followers = list(set(Vbot_followers))
nbot_followers = len(Vbot_followers)
return (nbot_followers,nbots,Vbot_followers) | 9,150 | 41.170507 | 143 | py |
ball-k-means | ball-k-means-master/PythonVersion/win_kmeans++_python.py | # This version is completed by Yong Zheng(413511280@qq.com), Shuyin Xia?380835019@qq.com?, Xingxin Chen, Junkuan Wang. 2020.5.1
import ctypes
import numpy as np
class ball_k_means:
def __init__(self, isDouble=0):
self.isDouble = isDouble
def fit(self, s1, k, isRing = False, detail = False, random_seed=-1, s2="0"):
# isRing == 1 represent alg with rings
# isRing == others represent alg with no rings
temp = np.loadtxt(s1, dtype=float, delimiter=',')
lenth = temp.shape[0]
m = np.linspace(1, lenth, lenth, dtype=int)
dataptr = m.ctypes.data_as(ctypes.c_char_p)
if self.isDouble == 1:
dll = ctypes.cdll.LoadLibrary('.\\ballXd.dll')
dataptr = m.ctypes.data_as(ctypes.c_char_p)
if s2 != '0':
dll.ball_kmeans_cent.argtypes = (ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int,
ctypes.c_bool, ctypes.c_bool, ctypes.c_char_p)
dll.ball_kmeans_cent(s1.encode('utf-8'), dataptr, lenth, isRing, detail, s2.encode('utf-8'))
else:
dll.ball_kmeans_initk.argtypes = (ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_int,
ctypes.c_bool, ctypes.c_bool, ctypes.c_int)
dll.ball_kmeans_initk(s1.encode('utf-8'), k, dataptr, lenth, isRing, detail, random_seed)
else:
dll = ctypes.cdll.LoadLibrary('.\\ballXf.dll')
dataptr = m.ctypes.data_as(ctypes.c_char_p)
if s2 != '0':
dll.ball_kmeans_cent.argtypes = (ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int,
ctypes.c_bool, ctypes.c_bool, ctypes.c_char_p)
dll.ball_kmeans_cent(s1.encode('utf-8'), dataptr, lenth, isRing, detail, s2.encode('utf-8'))
else:
dll.ball_kmeans_initk.argtypes = (ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_int,
ctypes.c_bool, ctypes.c_bool, ctypes.c_int)
dll.ball_kmeans_initk(s1.encode('utf-8'), k, dataptr, lenth, isRing, detail, random_seed)
return m
if __name__ == '__main__':
dataset_address = "C:\\Users\\Yog\\source\\repos\\test20\\test20\\data+centers\\dataset\\birchdata.csv"
clf = ball_k_means(isDouble=1)
m = clf.fit(dataset_address, 5, True, True, -1, "C:\\Users\\Yog\\source\\repos\\test20\\test20\\data+centers\\centroids\\birch4.csv")
| 2,550 | 52.145833 | 137 | py |
ball-k-means | ball-k-means-master/PythonVersion/linux_kmeans++_python.py | # This version is completed by Yong Zheng(413511280@qq.com), Shuyin Xia?380835019@qq.com?, Xingxin Chen, Junkuan Wang. 2020.5.1
import ctypes
import numpy as np
class ball_k_means:
def __init__(self, isDouble=0):
self.isDouble = isDouble
def fit(self, s1, k, isRing = False, detail = False, random_seed=-1, s2="0"):
# isRing == 1 represent alg with rings
# isRing == others represent alg with no rings
temp = np.loadtxt(s1, dtype=float, delimiter=',')
lenth = temp.shape[0]
m = np.linspace(1, lenth, lenth, dtype=int)
dataptr = m.ctypes.data_as(ctypes.c_char_p)
if self.isDouble == 1:
dll = ctypes.cdll.LoadLibrary('./libballXD.so')
dataptr = m.ctypes.data_as(ctypes.c_char_p)
if s2 != '0':
dll.ball_kmeans_cent.argtypes = (ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int,
ctypes.c_bool, ctypes.c_bool, ctypes.c_char_p)
dll.ball_kmeans_cent(s1.encode('utf-8'), dataptr, lenth, isRing, detail, s2.encode('utf-8'))
else:
dll.ball_kmeans_initk.argtypes = (ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_int,
ctypes.c_bool, ctypes.c_bool, ctypes.c_int)
dll.ball_kmeans_initk(s1.encode('utf-8'), k, dataptr, lenth, isRing, detail, random_seed)
else:
dll = ctypes.cdll.LoadLibrary('./libballXF.so')
dataptr = m.ctypes.data_as(ctypes.c_char_p)
if s2 != '0':
dll.ball_kmeans_cent.argtypes = (ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int,
ctypes.c_bool, ctypes.c_bool, ctypes.c_char_p)
dll.ball_kmeans_cent(s1.encode('utf-8'), dataptr, lenth, isRing, detail, s2.encode('utf-8'))
else:
dll.ball_kmeans_initk.argtypes = (ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_int,
ctypes.c_bool, ctypes.c_bool, ctypes.c_int)
dll.ball_kmeans_initk(s1.encode('utf-8'), k, dataptr, lenth, isRing, detail, random_seed)
return m
if __name__ == '__main__':
dataset_address = "C:\\Users\\Yog\\source\\repos\\test20\\test20\\data+centers\\dataset\\birchdata.csv"
clf = ball_k_means(isDouble=1)
m = clf.fit(dataset_address, 5, True, True, -1, "C:\\Users\\Yog\\source\\repos\\test20\\test20\\data+centers\\centroids\\birch4.csv")
| 2,552 | 52.1875 | 137 | py |
lcogtgemini | lcogtgemini-master/setup.py | from setuptools import setup
setup(name='lcogtgemini',
author=['Curtis McCully'],
author_email=['cmccully@lco.global'],
version=0.1,
packages=['lcogtgemini'],
install_requires=['numpy', 'astropy', 'scipy'],
entry_points={'console_scripts': ['reduce_gemini=lcogtgemini.main:run']})
| 318 | 30.9 | 79 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/main.py | import lcogtgemini
from lcogtgemini.combine import speccombine
from lcogtgemini.cosmicrays import crreject
from lcogtgemini.sky import skysub
from lcogtgemini.reduction import scireduce, extract
from lcogtgemini.utils import get_binning, rescale1e15
from lcogtgemini.qe import make_qecorrection
from lcogtgemini.wavelengths import wavesol, calculate_wavelengths
from lcogtgemini.telluric import telluric_correct, mktelluric, telluric_correction_exists
from lcogtgemini.fits_utils import clean_nans, updatecomheader, spectoascii
from lcogtgemini.flats import makemasterflat
from lcogtgemini.flux_calibration import flux_calibrate, makesensfunc
from lcogtgemini.extinction import correct_for_extinction
from lcogtgemini.file_utils import getobstypes, getobjname, gettxtfiles, maketxtfiles
from lcogtgemini.sort import sort, init_northsouth
from lcogtgemini.bpm import get_bad_pixel_mask
from lcogtgemini.utils import get_y_roi
from lcogtgemini.bias import makebias
import os
from astropy.io import fits
from pyraf import iraf
from glob import glob
from matplotlib import pyplot
pyplot.ion()
def run():
# copy over sensr.fits, sensb.fits files
# before running this script
# launch the image viewer
# os.system('ds9 &')
topdir = os.getcwd()
# Get the raw directory
rawpath = '%s/raw/' % topdir
# Sort the files into the correct directories
fs = sort()
# Change into the reduction directory
iraf.cd('work')
# Initialize variables that depend on which site was used
extfile, observatory, base_stddir, rawpath = init_northsouth(fs, topdir, rawpath)
# Get the observation type
obstypes, obsclasses = getobstypes(fs)
# get the object name
objname = getobjname(fs, obstypes)
# Make the text files for the IRAF tasks
maketxtfiles(fs, obstypes, obsclasses, objname)
# remember not to put ".fits" on the end of filenames!
flatfiles, arcfiles, scifiles = gettxtfiles(fs, objname)
binnings = set([get_binning(scifile, rawpath) for scifile in scifiles])
yroi = get_y_roi(scifiles[0], rawpath)
get_bad_pixel_mask(binnings, yroi)
if lcogtgemini.dobias:
# Make the bias frame
makebias(fs, obstypes, rawpath)
# Get the wavelength solution which is apparently needed for everything else
wavesol(arcfiles, rawpath)
if lcogtgemini.do_qecorr:
# Make the QE correction
make_qecorrection(arcfiles)
# Calculate the wavelengths of the unmosaiced data
calculate_wavelengths(arcfiles, rawpath)
# Make the master flat field image
makemasterflat(flatfiles, rawpath)
# Flat field and rectify the science images
scireduce(scifiles, rawpath)
# Run sky subtraction
skysub(scifiles, rawpath)
# Run LA Cosmic
crreject(scifiles)
# Extract the 1D spectrum
extract(scifiles)
# Correct for extinction
correct_for_extinction(scifiles, extfile)
# If standard star, make the sensitivity function
makesensfunc(scifiles, objname, base_stddir)
# Flux calibrate the spectrum
flux_calibrate(scifiles)
extractedfiles = glob('cxet*.fits')
# If a standard star, make the telluric correction
obsclass = fits.getval(extractedfiles[0], 'OBSCLASS')
if obsclass == 'progCal' or obsclass == 'partnerCal':
speccombine(extractedfiles, objname+'.notel.fits')
updatecomheader(extractedfiles, objname + '.notel.fits')
mktelluric(objname + '.notel.fits', objname, base_stddir)
if telluric_correction_exists():
# Telluric Correct
files_to_combine = telluric_correct(extractedfiles)
else:
files_to_combine = extractedfiles
# Combine the spectra
speccombine(files_to_combine, objname + '.fits')
# Update the combined file with the necessary header keywords
updatecomheader(extractedfiles, objname + '.fits')
#Clean the data of nans and infs
clean_nans(objname + '.fits')
# Write out the ascii file
spectoascii(objname + '.fits', objname + '.dat')
# Multiply by 1e-15 so the units are correct in SNEx:
rescale1e15(objname + '.fits')
# Change out of the reduction directory
iraf.cd('..')
if __name__ == "__main__":
run()
| 4,228 | 29.644928 | 89 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/reduction.py | import numpy as np
import lcogtgemini
from astropy.io import fits
from lcogtgemini.utils import get_binning
from lcogtgemini.file_utils import getsetupname
from pyraf import iraf
from lcogtgemini import fixpix
from lcogtgemini import fits_utils
def scireduce(scifiles, rawpath):
for f in scifiles:
binning = get_binning(f, rawpath)
fixed_rawpath = fixpix.fixpix(f, rawpath, binning, lcogtgemini.namps)
setupname = getsetupname(f)
if lcogtgemini.dobias:
bias_filename = "bias{binning}".format(binning=binning)
else:
bias_filename = ''
# gsreduce subtracts bias and mosaics detectors
iraf.unlearn(iraf.gsreduce)
iraf.gsreduce('@' + f, outimages=f[:-4]+'.mef', rawpath=fixed_rawpath, bias=bias_filename,
fl_bias=lcogtgemini.dobias, fl_over=lcogtgemini.dooverscan, fl_fixpix='no',
fl_flat=False, fl_gmosaic=False, fl_cut=False, fl_gsappwave=False, fl_oversize=False,
fl_vardq=lcogtgemini.dodq)
if lcogtgemini.do_qecorr:
# Renormalize the chips to remove the discrete jump in the
# sensitivity due to differences in the QE for different chips
iraf.unlearn(iraf.gqecorr)
iraf.gqecorr(f[:-4]+'.mef', outimages=f[:-4]+'.qe.fits', fl_keep=True, fl_correct=True,
fl_vardq=lcogtgemini.dodq, refimages=setupname + '.arc.arc.fits',
corrimages=setupname +'.qe.fits', verbose=True)
unmosaiced_name = f[:-4]+'.qe.fits'
else:
unmosaiced_name = f[:-4]+'.mef.fits'
# Flat field the image
hdu = fits.open(unmosaiced_name, mode='update')
flat_hdu = fits.open(setupname+'.flat.fits')
for i in range(1, lcogtgemini.namps + 1):
hdu[i].data /= flat_hdu[i].data
hdu.flush()
hdu.close()
iraf.unlearn(iraf.gmosaic)
iraf.gmosaic(unmosaiced_name, outimages=f[:-4] + '.fits', fl_vardq=lcogtgemini.dodq,
fl_clean=False)
# Transform the data based on the arc wavelength solution
iraf.unlearn(iraf.gstransform)
iraf.gstransform(f[:-4], wavtran=setupname + '.arc', fl_vardq=lcogtgemini.dodq)
def extract(scifiles):
for f in scifiles:
iraf.unlearn(iraf.gsextract)
# Extract the specctrum
iraf.gsextract('t' + f[:-4], fl_inter='yes', bfunction='legendre',
fl_vardq=lcogtgemini.dodq,
border=2, bnaverage=-3, bniterate=2, blow_reject=2.0,
bhigh_reject=2.0, long_bsample='-100:-40,40:100',
background='fit', weights='variance',
lsigma=3.0, usigma=3.0, tnsum=100, tstep=100, mode='h')
# Trim off below the blue side cut
hdu = fits.open('et' + f[:-4] +'.fits', mode='update')
lam = fits_utils.fitshdr_to_wave(hdu['SCI'].header)
w = lam > lcogtgemini.bluecut
trimmed_data =np.zeros((1, w.sum()))
trimmed_data[0] = hdu['SCI'].data[0, w]
hdu['SCI'].data = trimmed_data
hdu['SCI'].header['NAXIS1'] = w.sum()
hdu['SCI'].header['CRPIX1'] = 1
hdu['SCI'].header['CRVAL1'] = lam[w][0]
hdu.flush()
hdu.close()
| 3,327 | 40.08642 | 107 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/wavelengths.py | import lcogtgemini
from lcogtgemini import utils, file_utils, fixpix
from pyraf import iraf
import numpy as np
from astropy.io import fits, ascii
import os
import time
def wavesol(arcfiles, rawpath):
for f in arcfiles:
binning = utils.get_binning(f, rawpath)
fixed_rawpath = fixpix.fixpix(f, rawpath, binning, lcogtgemini.namps)
iraf.unlearn(iraf.gsreduce)
if lcogtgemini.dobias:
bias_filename = "bias{binning}".format(binning=binning)
else:
bias_filename = ''
iraf.gsreduce('@' + f, outimages=f[:-4], rawpath=fixed_rawpath,
fl_flat=False, bias=bias_filename, fl_bias=lcogtgemini.dobias,
fl_fixpix=False, fl_over=lcogtgemini.dooverscan, fl_cut=False, fl_gmosaic=True,
fl_gsappwave=True, fl_oversize=False, fl_vardq=lcogtgemini.dodq)
# determine wavelength calibration -- 1d and 2d
iraf.unlearn(iraf.gswavelength)
iraf.gswavelength(f[:-4], fl_inter='yes', fl_addfeat=False, fwidth=15.0, low_reject=2.0,
high_reject=2.0, step=10, nsum=10, gsigma=2.0, cradius=16.0,
match=-6, order=7, fitcxord=7, fitcyord=7)
if lcogtgemini.do_qecorr:
# Make an extra random copy so that gqecorr works. Stupid Gemini.
iraf.cp(f[:-4]+'.fits', f[:-4]+'.arc.fits')
# transform the CuAr spectrum, for checking that the transformation is OK
# output spectrum has prefix t
iraf.unlearn(iraf.gstransform)
iraf.gstransform(f[:-4], wavtran=f[:-4])
def calculate_wavelengths(arcfiles, rawpath):
for f in arcfiles:
images = file_utils.get_images_from_txt_file(f)
setupname = file_utils.getsetupname(f, calfile=True)
output_file = setupname + '.wavelengths.fits'
if os.path.exists(output_file):
# short circuit
return
binning = [float(i) for i in utils.get_binning(f, rawpath).split('x')]
for image in images:
hdu = fits.open(os.path.join(rawpath, image))
for i in range(1, lcogtgemini.namps + 1):
print('Calculating wavelengths for {setup} for amplifier {amp}'.format(setup=setupname, amp=i))
mosaic_file = mosiac_coordinates(hdu, i, setupname, binning)
hdu[i].data = utils.convert_pixel_list_to_array(mosaic_file, hdu[i].data.shape[1], hdu[i].data.shape[0])
hdu.writeto(output_file, overwrite=True)
def mosiac_coordinates(hdu, i, setupname, binning):
start_time = time.time()
# Fake mosaicing
X, Y = np.meshgrid(np.arange(hdu[i].data.shape[1]) + 1, np.arange(hdu[i].data.shape[0]) + 1)
X = X.astype(np.float)
Y = Y.astype(np.float)
now = time.time()
print('1st stop : {x} seconds'.format(x=now - start_time))
start_time = now
# Rotate the frame about the center for the given transformation
x_center = (hdu[i].data.shape[1] / 2.0) + 0.5
y_center = (hdu[i].data.shape[0] / 2.0) + 0.5
# Subtract off the center
X -= x_center
Y -= y_center
chip_number = (i - 1) // (lcogtgemini.namps // lcogtgemini.nchips)
rotation = lcogtgemini.chip_rotations[chip_number]
X = np.cos(np.radians(rotation)) * X - np.sin(np.radians(rotation)) * Y
Y = np.sin(np.radians(rotation)) * X - np.cos(np.radians(rotation)) * Y
now = time.time()
print('2nd stop : {x} seconds'.format(x=now - start_time))
start_time = now
# Add back in the chip centers
X += x_center
Y += y_center
# Add in the X detsec
X += (float(hdu[i].header['DETSEC'][1:].split(':')[0]) - 1.0) / binning[0]
# Add in the X starting datasec
X -= (float(hdu[i].header['DATASEC'][1:].split(':')[0]) - 1.0)
# Add in the chip gaps * (i - 1) // 4
X += chip_number * lcogtgemini.chip_gap_size / binning[0]
# Add in the chip shifts
X += lcogtgemini.xchip_shifts[chip_number] / binning[0]
Y += lcogtgemini.ychip_shifts[chip_number] / binning[1]
# Account for the fact that the center of binned pixels is not the center of unbinned pixels
# Currently this assumes that the binning is 2 and there are 3 chips
# I could not find any corresponding shift in the Y. Hopefully that is not a problem....
if binning[0] > 1:
X += (chip_number - 2) / binning[0]
now = time.time()
print('3rd stop : {x} seconds'.format(x=now - start_time))
start_time = now
# Write the coordinates to a text file
pixel_list = setupname + '.{i}.pix.dat'.format(i=i)
ascii.write({'x': X.ravel(), 'y': Y.ravel()}, pixel_list, names=['x', 'y'], format='fast_no_header')
now = time.time()
print('4th stop : {x} seconds'.format(x=now - start_time))
start_time = now
output_wavelength_textfile = setupname + '.{i}.waves.dat'.format(i=i)
# Then evaluate the fit coords transformation at each pixel
iraf.unlearn(iraf.fceval)
iraf.flpr()
iraf.flpr()
iraf.fceval(pixel_list, output_wavelength_textfile, setupname + '.arc_001')
now = time.time()
print('5th stop : {x} seconds'.format(x=now - start_time))
start_time = now
return output_wavelength_textfile
| 5,218 | 38.240602 | 120 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/cosmicrays.py | import numpy as np
from astropy.io import fits
from astroscrappy.astroscrappy import detect_cosmics
import lcogtgemini
from lcogtgemini.fits_utils import tofits
from lcogtgemini import fixpix
def crreject(scifiles):
for f in scifiles:
# run lacosmicx
hdu_skysub = fits.open('st' + f.replace('.txt', '.fits'))
hdu = fits.open('t' + f.replace('.txt', '.fits'))
background = hdu[2].data - hdu_skysub[2].data
readnoise = float(hdu[2].header['RDNOISE'])
# figure out what pssl should be approximately
d = hdu[2].data.copy()
mask = d == 0.0
mask = np.logical_or(mask, d > (50000.0 * float(hdu[0].header['GAINMULT'])))
if lcogtgemini.dodq:
mask = np.logical_or(mask, hdu['DQ'].data)
crmask, _cleanarr = detect_cosmics(d, inbkg=background, inmask=mask, sigclip=5.0,
objlim=6.0, sigfrac=0.1, gain=1.0,
readnoise=readnoise)
tofits(f[:-4] + '.lamask.fits', np.array(crmask, dtype=np.uint8), hdr=hdu['SCI'].header.copy())
fixpix.run_fixpix('t' + f[:-4] + '.fits[2]', f[:-4] + '.lamask.fits')
| 1,193 | 37.516129 | 103 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/fixpix.py | import os
from pyraf import iraf
from lcogtgemini import file_utils
def fixpix(txtfile, rawpath, binning, namps):
images = file_utils.get_images_from_txt_file(txtfile)
for image in images:
if not os.path.exists('../raw_fixpix'):
iraf.mkdir('../raw_fixpix')
iraf.cp(os.path.join(rawpath, image), '../raw_fixpix/')
for i in range(1, namps + 1):
run_fixpix(os.path.join('../raw_fixpix', image) + '[{i}]'.format(i=i),
'bpm.{i}.{binning}'.format(i=i, binning=binning))
return '../raw_fixpix'
def run_fixpix(filename, maskname):
# Run fixpix to interpolate over cosmic rays and bad pixels
iraf.unlearn(iraf.fixpix)
iraf.fixpix(filename, maskname, mode='h')
| 751 | 31.695652 | 82 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/fits_utils.py | import numpy as np
from astropy.io import fits
def sanitizeheader(hdr):
# Remove the mandatory keywords from a header so it can be copied to a new
# image.
hdr = hdr.copy()
# Let the new data decide what these values should be
for i in ['SIMPLE', 'BITPIX', 'BSCALE', 'BZERO']:
if i in hdr.keys():
hdr.pop(i)
# if hdr.has_key('NAXIS'):
if 'NAXIS' in hdr.keys():
naxis = hdr.pop('NAXIS')
for i in range(naxis):
hdr.pop('NAXIS%i' % (i + 1))
return hdr
def tofits(filename, data, hdr=None, clobber=False):
"""simple pyfits wrapper to make saving fits files easier."""
hdu = fits.PrimaryHDU(data)
if not (hdr is None):
hdu.header += hdr
hdulist = fits.HDUList([hdu])
hdulist.writeto(filename, overwrite=clobber, output_verify='ignore')
def clean_nans(filename):
# Clean the data of infs and nans
hdu = fits.open(filename, mode='update')
hdu[0].data[np.isnan(hdu[0].data)] = 0.0
hdu[0].data[np.isinf(hdu[0].data)] = 0.0
hdu.flush()
hdu.close()
def fitshdr_to_wave(hdr):
crval = float(hdr['CRVAL1'])
crpix = float(hdr['CRPIX1'])
# Convert crpix to be zero indexed
crpix -= 1
if 'CDELT1' in hdr.keys():
cdelt = float(hdr['CDELT1'])
else:
cdelt = float(hdr['CD1_1'])
npix = float(hdr['NAXIS1'])
lam = np.arange(crval - cdelt * crpix ,
crval + cdelt * (npix - crpix) - 1e-4,
cdelt)
return lam
def hdr_pixel_range(x0, x1, y0, y1):
return '[{0:d}:{1:d},{2:d}:{3:d}]'.format(x0, x1, y0, y1)
def get_x_pixel_range(keyword_value):
"""
Get the x part of a section keyword
:param keyword_value: Header keyword string
:return: list xrange: 2 element list with start and end 1-indexed pixel values
"""
# Strip off the brackets and split the coordinates
pixel_sections = keyword_value[1:-1].split(',')
return pixel_sections[0].split(':')
def cut_gs_image(filename, output_filename, pixel_range, namps):
"""
:param filename:
:param output_filename:
:param pixel_range: array-like, The range of pixels to keep, python indexed,
given in binned pixels
:return:
"""
hdu = fits.open(filename, unit16=True)
for i in range(1, namps + 1):
ccdsum = hdu[i].header['CCDSUM']
ccdsum = np.array(ccdsum.split(), dtype=np.int)
y_ccdsec = [(pixel_range[0] * ccdsum[1]) + 1,
(pixel_range[1]) * ccdsum[1]]
x_detector_section = get_x_pixel_range(hdu[i].header['DETSEC'])
hdu[i].header['DETSEC'] = hdr_pixel_range(int(x_detector_section[0]), int(x_detector_section[1]), y_ccdsec[0], y_ccdsec[1])
x_ccd_section = get_x_pixel_range(hdu[i].header['CCDSEC'])
hdu[i].header['CCDSEC'] = hdr_pixel_range(int(x_ccd_section[0]), int(x_ccd_section[1]), y_ccdsec[0], y_ccdsec[1])
numpix = pixel_range[1] - pixel_range[0]
x_bias_section = get_x_pixel_range(hdu[i].header['BIASSEC'])
hdu[i].header['BIASSEC'] = hdr_pixel_range(int(x_bias_section[0]), int(x_bias_section[1]), 1, numpix)
x_data_section = get_x_pixel_range(hdu[i].header['DATASEC'])
hdu[i].header['DATASEC'] = hdr_pixel_range(int(x_data_section[0]), int(x_data_section[1]), 1, numpix)
hdu[i].data = hdu[i].data[pixel_range[0]:pixel_range[1], :]
hdu.writeto(output_filename)
hdu.close()
def updatecomheader(extractedfiles, filename):
airmasses = []
exptimes = []
for f in extractedfiles:
airmasses.append(float(fits.getval(f, 'AIRMASS')))
exptimes.append(float(fits.getval(f, 'EXPTIME')))
fits.setval(filename, 'AIRMASS', value=np.mean(airmasses))
fits.setval(filename, 'SLIT', value=fits.getval(extractedfiles[0], 'MASKNAME').replace('arcsec', ''))
comhdu = fits.open(filename, mode='update')
extractedhdu = fits.open(extractedfiles[0])
for k in extractedhdu[0].header.keys():
if not k in comhdu[0].header.keys():
extractedhdu[0].header.cards[k].verify('fix')
comhdu[0].header.append(extractedhdu[0].header.cards[k])
comhdu.flush(output_verify='fix')
comhdu.close()
extractedhdu.close()
dateobs = fits.getval(filename, 'DATE-OBS')
dateobs += 'T' + fits.getval(filename, 'TIME-OBS')
fits.setval(filename, 'DATE-OBS', value=dateobs)
def spectoascii(infilename, outfilename):
hdu = fits.open(infilename)
try:
lam = fitshdr_to_wave(hdu['SCI'].header.copy())
flux = hdu['SCI'].data.copy()
except:
lam = fitshdr_to_wave(hdu[0].header.copy())
flux = hdu[0].data.copy()
hdu.close()
d = np.zeros((2, len(lam)))
d[0] = lam
d[1] = flux
np.savetxt(outfilename, d.transpose()) | 4,823 | 31.594595 | 131 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/bpm.py | import lcogtgemini
from pyraf import iraf
from astropy.io import fits
import numpy as np
def get_bad_pixel_mask(binnings, yroi):
if lcogtgemini.detector == 'Hamamatsu':
if lcogtgemini.is_GS:
bpm_file = 'bpm_gs.fits'
else:
bpm_file = 'bpm_gn.fits'
bpm_hdu = fits.open(bpm_file, uint16=True)
for i in range(1, lcogtgemini.namps + 1):
bpm_hdu[i].data = bpm_hdu[i].data[yroi[0]-1:yroi[1]]
bpm_hdu[i].writeto('bpm.{i}.unbinned.fits'.format(i=i), overwrite=True)
for binning in binnings:
binning_list = binning.split('x')
binx, biny = int(binning_list[0]), int(binning_list[1])
iraf.unlearn('blkavg')
binned_bpm_filename = 'bpm.{i}.{x}x{y}.fits'.format(i=i,x=binx, y=biny)
iraf.blkavg('bpm.{i}.unbinned.fits[1]'.format(i=i),
binned_bpm_filename, binx, biny)
averaged_bpm = fits.open(binned_bpm_filename)
# Remove some header keywords from the BPM that confuses fixpix
for keyword in ['LTV1', 'LTV2', 'LTM1_1', 'LTM2_2']:
averaged_bpm[0].header.remove(keyword, ignore_missing=True)
averaged_bpm[0].data[averaged_bpm[0].data > 0.1] = 1
averaged_bpm[0].data = averaged_bpm[0].data.astype(np.uint8)
averaged_bpm.writeto(binned_bpm_filename, overwrite=True)
else:
for binning in binnings:
binning_list = binning.split('x')
binx, biny = int(binning_list[0]), int(binning_list[1])
for i in range(1, lcogtgemini.namps + 1):
bpm_data = np.zeros((2048 // int(biny), 1080 // int(binx)), dtype=np.uint8)
binned_bpm_filename = 'bpm.{i}.{x}x{y}.fits'.format(i=i, x=binx, y=biny)
fits.writeto(binned_bpm_filename, bpm_data)
| 1,925 | 43.790698 | 91 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/utils.py | from astropy.io import ascii, fits
import numpy as np
from lcogtgemini import file_utils
import os
from scipy.signal import butter, lfilter
import lcogtgemini
def mad(d):
return np.median(np.abs(np.median(d) - d))
def magtoflux(wave, mag, zp):
# convert from ab mag to flambda
# 3e-19 is lambda^2 / c in units of angstrom / Hz
return zp * 10 ** (-0.4 * mag) / 3.33564095e-19 / wave / wave
def fluxtomag(flux):
return -2.5 * np.log10(flux)
def get_y_roi(txtfile, rawpath):
images = file_utils.get_images_from_txt_file(txtfile)
hdu = fits.open(os.path.join(rawpath, images[0]))
return [int(i) for i in hdu[1].header['DETSEC'][1:-1].split(',')[1].split(':')]
def boxcar_smooth(spec_wave, spec_flux, smoothwidth):
# get the average wavelength separation for the observed spectrum
# This will work best if the spectrum has equal linear wavelength spacings
wavespace = np.diff(spec_wave).mean()
# kw
kw = int(smoothwidth / wavespace)
# make sure the kernel width is odd
if kw % 2 == 0:
kw += 1
kernel = np.ones(kw)
# Conserve flux
kernel /= kernel.sum()
smoothed = spec_flux.copy()
smoothed[(kw / 2):-(kw / 2)] = np.convolve(spec_flux, kernel, mode='valid')
return smoothed
def get_binning(txt_filename, rawpath):
with open(txt_filename) as f:
lines = f.readlines()
return fits.getval(rawpath + lines[0].rstrip(), 'CCDSUM', 1).replace(' ', 'x')
def convert_pixel_list_to_array(filename, nx, ny):
data = ascii.read(filename, format='fast_no_header')
return data['col3'].reshape(ny, nx)
def rescale1e15(filename):
hdu = fits.open(filename, mode='update')
hdu[0].data *= 1e15
hdu.flush()
hdu.close()
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def get_wavelengths_of_chips(wavelengths_hdu):
midline = wavelengths_hdu[1].data.shape[0] // 2
amps_per_chip = lcogtgemini.namps // lcogtgemini.nchips
chips = []
# For each chip
for c in range(lcogtgemini.nchips):
chip_wavelengths = []
# For each amplifier on the chip
for amplifier in range(c * amps_per_chip + 1, (c + 1) * amps_per_chip + 1):
# Get the wavelength 10 pixels into datasec and 10 pixels from the edge of datasec
start_data_range, end_data_range = [int(x) for x in wavelengths_hdu[amplifier].header['DATASEC'][1:-1].split(',')[0].split(':')]
chip_wavelengths.append(wavelengths_hdu[amplifier].data[midline, 9 + start_data_range])
chip_wavelengths.append(wavelengths_hdu[amplifier].data[midline, end_data_range - 9])
# Take the min and max wavelengths of
chips.append((min(chip_wavelengths), max(chip_wavelengths)))
return chips
| 3,053 | 31.147368 | 140 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/combine.py | import numpy as np
import lcogtgemini
from astropy.io import fits, ascii
from lcogtgemini import fits_utils
from lcogtgemini import file_utils
from astropy.convolution import convolve, Gaussian1DKernel
from lcogtgemini import utils
from pyraf import iraf
def find_bad_pixels(data, threshold=30.0):
# Take the abs next pixel diff
absdiff = np.abs(data[1:] - data[:-1])
scaled_diff = absdiff / convolve(data, Gaussian1DKernel(stddev=20.0))[1:]
# divide by square root of 2
scatter = scaled_diff / np.sqrt(2.0)
# Take the median
# multply by 1.48 to go from mad to stdev
scatter = np.median(scatter) * 1.48
# Anything that is a 10 sigma outlier, set to 0
deviant_pixels = scaled_diff > (threshold * scatter)
bad_pixels = np.zeros(data.shape, dtype=bool)
bad_pixels[0:-1] = deviant_pixels
bad_pixels[1:] = np.logical_or(bad_pixels[1:], deviant_pixels)
return bad_pixels
def speccombine(fs, outfile):
scales = []
wavelengths = []
for f in fs:
hdu = fits.open(f)
wavelengths.append(fits_utils.fitshdr_to_wave(hdu['SCI'].header))
# Get the overlaps
overlap_min_w = 0.0
overlap_max_w = 100000.0
min_w = 1000000.0
max_w = 0.0
wavelength_step = 1000.0
for wavelength in wavelengths:
overlap_min_w = max([overlap_min_w, np.min(wavelength)])
overlap_max_w = min([overlap_max_w, np.max(wavelength)])
min_w = min([min_w, np.min(wavelength)])
max_w = max([max_w, np.max(wavelength)])
wavelength_step = min([wavelength_step, wavelength[1] - wavelength[0]])
min_w = np.max([min_w, lcogtgemini.bluecut])
first_hdu = fits.open(fs[0])
first_wavelengths = fits_utils.fitshdr_to_wave(first_hdu['SCI'].header)
bad_pixels = find_bad_pixels(first_hdu['SCI'].data[0])
first_hdu['SCI'].data[0][bad_pixels] = 0.0
wavelength_grid = np.arange(min_w, max_w + wavelength_step, wavelength_step)
data_to_combine = np.zeros((len(fs), wavelength_grid.shape[0]))
for i, f in enumerate(fs):
hdu = fits.open(f)
wavelengths = fits_utils.fitshdr_to_wave(hdu['SCI'].header)
in_chips = np.zeros(wavelengths.shape, dtype=bool)
basename = file_utils.get_base_name(f)
wavelengths_hdu = fits.open(basename +'.wavelengths.fits')
chips = utils.get_wavelengths_of_chips(wavelengths_hdu)
for chip in chips:
in_chip = np.logical_and(wavelengths >= min(chip), wavelengths <= max(chip))
in_chips[in_chip] = True
hdu['SCI'].data[0][~in_chips] = 0.0
overlap = np.logical_and(wavelengths >= overlap_min_w, wavelengths <= overlap_max_w)
# Reject outliers
bad_pixels = find_bad_pixels(hdu['SCI'].data[0])
in_telluric = np.logical_and(wavelengths >= 6640.0, wavelengths <= 7040.0)
in_telluric = np.logical_or(in_telluric, np.logical_and(wavelengths >= 7550.0, wavelengths <= 7750.0))
bad_pixels[in_telluric] = False
# Take the median of the ratio of each spectrum to the first to get the rescaling
first_fluxes = np.interp(wavelengths[overlap], first_wavelengths, first_hdu['SCI'].data[0], left=0.0, right=0.0)
good_pixels = np.ones(hdu['SCI'].data[0].shape[0], dtype=bool)
good_pixels[overlap] = np.logical_and(hdu['SCI'].data[0][overlap] != 0.0, first_fluxes != 0.0)
good_pixels = np.logical_and(good_pixels, ~(bad_pixels))
scale = np.median(first_fluxes[good_pixels[overlap]] / hdu['SCI'].data[0][overlap][good_pixels[overlap]])
scales.append(scale)
hdu['SCI'].data[0][hdu['SCI'].data[0] == 0.0] = -1.e9
hdu['SCI'].data[0][bad_pixels] = -1.e9
data_to_combine[i] = np.interp(wavelength_grid, wavelengths, hdu['SCI'].data[0], left=0.0, right=0.0)
data_to_combine[data_to_combine < 0.0] = 0.0
data_to_combine[i] *= scale
# write the scales into a file
ascii.write({'scale': scales}, 'scales.dat', names=['scale'], format='fast_no_header')
combined_data = data_to_combine.sum(axis=0)
weights = (data_to_combine > 0).sum(axis=0)
weights[weights == 0.0] = 1.0
combined_data /= weights
first_hdu[0].data = combined_data
first_hdu[0].header['CRPIX1'] = 1
first_hdu[0].header['CRVAL1'] = min_w
first_hdu[0].header['CD1_1'] = wavelength_step
first_hdu[0].header['CD2_2'] = 1
first_hdu[0].header['CTYPE1'] = 'LINEAR '
first_hdu[0].header['CTYPE2'] = 'LINEAR '
first_hdu[0].header['WAT1_001'] = 'wtype=linear label=Wavelength units=angstroms'
first_hdu[0].header['WAT0_001'] = 'system=equispec'
first_hdu[0].header['WAT2_001'] = 'wtype=linear'
first_hdu[0].header['APNUM1'] = first_hdu['SCI'].header['APNUM1']
first_hdu[0].writeto(outfile)
iraf.unlearn(iraf.scombine)
| 4,813 | 40.5 | 120 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/flats.py | import lcogtgemini
from lcogtgemini.utils import get_binning
from lcogtgemini.file_utils import getsetupname
from lcogtgemini import fits_utils
from lcogtgemini import fixpix
from lcogtgemini import fitting
from lcogtgemini import utils
import numpy as np
from pyraf import iraf
from astropy.io import fits
import os
from lcogtgemini import combine
from astropy.table import Table
def reduce_flat(flatfile, rawpath):
binning = get_binning(flatfile, rawpath)
fixed_rawpath = fixpix.fixpix(flatfile, rawpath, binning, lcogtgemini.namps)
setupname = getsetupname(flatfile, calfile=True)
# Use IRAF to get put the data in the right format and subtract the
# bias
# This will currently break if multiple flats are used for a single setting
iraf.unlearn(iraf.gsreduce)
if lcogtgemini.dobias:
biasfile = "bias{binning}".format(binning=binning)
else:
biasfile = ''
iraf.gsreduce('@' + flatfile, outimages=flatfile[:-4] + '.mef.fits', rawpath=fixed_rawpath, fl_bias=lcogtgemini.dobias,
bias=biasfile, fl_over=lcogtgemini.dooverscan, fl_flat=False, fl_gmosaic=False,
fl_fixpix=False, fl_gsappwave=False, fl_cut=False, fl_title=False,
fl_oversize=False, fl_vardq=lcogtgemini.dodq)
if lcogtgemini.do_qecorr:
# Renormalize the chips to remove the discrete jump in the
# sensitivity due to differences in the QE for different chips
iraf.unlearn(iraf.gqecorr)
iraf.gqecorr(flatfile[:-4] + '.mef', outimages=flatfile[:-4] + '.qe.fits', fl_keep=True, fl_correct=True,
refimages=flatfile[:-4].replace('flat', 'arc.arc.fits'),
corrimages=flatfile[:-9] + '.qe.fits', verbose=True, fl_vardq=lcogtgemini.dodq)
mosaic_input = flatfile[:-4] + '.qe.fits'
else:
mosaic_input = flatfile[:-4] + '.mef.fits'
iraf.unlearn(iraf.gmosaic)
iraf.gmosaic(mosaic_input, outimages=flatfile[:-4] + '.mos.fits', fl_vardq=lcogtgemini.dodq, fl_clean=False)
iraf.unlearn(iraf.gstransform)
iraf.gstransform(flatfile[:-4]+'.mos.fits', wavtran=setupname + '.arc', fl_vardq=lcogtgemini.dodq)
def makemasterflat(flatfiles, rawpath, plot=True):
# normalize the flat fields
for flatfile in flatfiles:
# Short circuit
if os.path.exists(flatfile[:-4] + '.fits'):
continue
reduce_flat(flatfile, rawpath)
setupname = getsetupname(flatfile, calfile=True)
flat_hdu = fits.open('t'+ flatfile[:-4] + '.mos.fits')
wavelengths_hdu = fits.open(setupname+'.wavelengths.fits')
# Open the unmoasiced (and optionally qe corrected flat file)
if lcogtgemini.do_qecorr:
unmosaiced_file = flatfile[:-4] + '.qe.fits'
else:
unmosaiced_file = flatfile[:-4] + '.mef.fits'
unmosaiced_hdu = fits.open(unmosaiced_file)
chips = utils.get_wavelengths_of_chips(wavelengths_hdu)
# Median out the fringing
data = np.median(flat_hdu['SCI'].data, axis=0)
wavelengths = fits_utils.fitshdr_to_wave(flat_hdu['SCI'].header)
errors = np.sqrt(np.abs(data) + float(flat_hdu['SCI'].header['RDNOISE']) ** 2.0)
for nchip, chip in enumerate(chips):
good_data = data != 0.0
# Clip the ends because of craziness that happens at the edges
good_data[:20] = False
good_data[-20:] = False
bad_pixels = combine.find_bad_pixels(data)
good_data = np.logical_and(good_data, ~bad_pixels)
in_chip = np.logical_and(wavelengths >= min(chip), wavelengths <= max(chip))
best_fit = fitting.fit_polynomial_fourier_model(wavelengths[in_chip], data[in_chip], errors[in_chip], 3, 11, good_data[in_chip], weight_scale=10.0)
Table({'wavelength': wavelengths[in_chip],
'flux': fitting.eval_fit(best_fit, wavelengths[in_chip])}).write('lamp.{0}.{1}.dat'.format(flatfile[:-4], nchip),
format='ascii.fast_no_header')
for i in range(1, lcogtgemini.namps + 1):
x_pixel_range = fits_utils.get_x_pixel_range(wavelengths_hdu[i].header['DATASEC'])
unmosaiced_wavelengths = wavelengths_hdu[i].data[:, int(x_pixel_range[0]) - 1:int(x_pixel_range[1])]
# If more than half of the wavelengths in this amp are on the chip
midline = wavelengths_hdu[i].data.shape[0] // 2
in_chip = np.logical_and(unmosaiced_wavelengths[midline] >= min(chip), unmosaiced_wavelengths[midline] <= max(chip))
if in_chip.sum() > 0.5 * in_chip.shape[0]:
unmosaiced_hdu[i].data /= fitting.eval_fit(best_fit, unmosaiced_wavelengths)
unmosaiced_hdu.writeto(flatfile[:-4] + '.fits')
| 4,882 | 46.872549 | 159 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/sort.py | import lcogtgemini
import numpy as np
from pyraf import iraf
import os
from glob import glob
from astropy.io import fits
def sort():
if not os.path.exists('raw'):
iraf.mkdir('raw')
fs = glob('*.fits')
fs += glob('*.dat')
for f in fs:
iraf.mv(f, 'raw/')
# Make a reduction directory
if not os.path.exists('work'):
iraf.mkdir('work')
sensfs = glob('raw/sens*.fits')
if len(sensfs) != 0:
for f in sensfs:
iraf.cp(f, 'work/')
if os.path.exists('raw/telcor.dat'):
iraf.cp('raw/telcor.dat', 'work/')
if os.path.exists('raw/telluric_model.dat'):
iraf.cp('raw/telluric_model.dat', 'work/')
std_files = glob('raw/*.std.dat')
if len(std_files) != 0:
for f in std_files:
iraf.cp(f, 'work/')
if os.path.exists('raw/bias.fits'):
iraf.cp('raw/bias.fits', 'work/')
bpm_file_list = glob('raw/bpm_g?.fits')
if len(bpm_file_list) != 0:
iraf.cp(bpm_file_list[0], 'work/')
fs = glob('raw/*.qe.fits')
if len(fs) > 0:
for f in fs:
iraf.cp(f, 'work/')
fs = glob('raw/*.wavelengths.fits')
if len(fs) > 0:
for f in fs:
iraf.cp(f, 'work/')
fs = glob('raw/*.flat.fits')
if len(fs) > 0:
for f in fs:
iraf.cp(f, 'work/')
# make a list of the raw files
fs = glob('raw/*.fits')
# Add a ../ in front of all of the file names
for i in range(len(fs)):
fs[i] = '../' + fs[i]
return np.array(fs)
def init_northsouth(fs, topdir, rawpath):
lcogtgemini.is_GS = fits.getval(fs[0], 'OBSERVAT') == 'Gemini-South'
if 'Hamamatsu' in fits.getval(fs[0], 'DETECTOR'):
lcogtgemini.dooverscan = True
lcogtgemini.do_qecorr = True
lcogtgemini.detector = 'Hamamatsu'
lcogtgemini.namps = 12
if lcogtgemini.is_GS:
lcogtgemini.xchip_shifts = [-1.2, 0.0, 0.0]
lcogtgemini.ychip_shifts = [0.71, 0.0, -0.73]
lcogtgemini.chip_rotations = [0.0, 0.0, 0.0]
lcogtgemini.chip_gap_size = 61.0
else:
lcogtgemini.xchip_shifts = [-0.95, 0.0, 0.48]
lcogtgemini.ychip_shifts = [-0.21739, 0.0, 0.1727]
lcogtgemini.chip_rotations = [-0.004, 0.0, -0.00537 ]
lcogtgemini.chip_gap_size = 67.0
elif not lcogtgemini.is_GS:
lcogtgemini.namps = 6
lcogtgemini.dooverscan = True
lcogtgemini.chip_gap_size = 37.0
lcogtgemini.xchip_shifts = [-2.7, 0.0, 2.8014]
lcogtgemini.ychip_shifts = [-0.749, 0.0, 2.05]
lcogtgemini.chip_rotations = [-0.009, 0.0, -0.003]
lcogtgemini.detector = 'E2V DD'
if lcogtgemini.is_GS:
base_stddir = 'ctionewcal/'
observatory = 'Gemini-South'
extfile = iraf.osfn('gmisc$lib/onedstds/ctioextinct.dat')
else:
base_stddir = 'spec50cal/'
extfile = iraf.osfn('gmisc$lib/onedstds/kpnoextinct.dat')
observatory = 'Gemini-North'
return extfile, observatory, base_stddir, rawpath
| 3,071 | 29.117647 | 72 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/flux_calibration.py | import os
import numpy as np
from astropy.io import fits, ascii
from pyraf import iraf
import lcogtgemini.file_utils
from lcogtgemini import combine
from lcogtgemini import fits_utils
from lcogtgemini import file_utils
from lcogtgemini import fitting
from lcogtgemini import utils
from lcogtgemini.file_utils import getredorblue, get_standard_file
def flux_calibrate(scifiles):
for f in scifiles:
redorblue = getredorblue(f)
# Read in the sensitivity function (in magnitudes)
sensitivity_hdu = fits.open('sens{redorblue}.fits'.format(redorblue=redorblue))
sensitivity_wavelengths = fits_utils.fitshdr_to_wave(sensitivity_hdu['SCI'].header)
# Interpolate the sensitivity onto the science wavelengths
science_hdu = fits.open('xet'+ f.replace('.txt', '.fits'))
science_wavelengths = fits_utils.fitshdr_to_wave(science_hdu['SCI'].header)
sensitivity_correction = np.interp(science_wavelengths, sensitivity_wavelengths, sensitivity_hdu['SCI'].data)
# Multiply the science spectrum by the corrections
science_hdu['SCI'].data *= 10 ** (-0.4 * sensitivity_correction)
science_hdu['SCI'].data /= float(science_hdu[0].header['EXPTIME'])
science_hdu.writeto('cxet' + f[:-4] + '.fits')
if os.path.exists('cxet' + f[:-4] + '.fits'):
iraf.unlearn(iraf.splot)
iraf.splot('cxet' + f.replace('.txt', '.fits') + '[sci]') # just to check
def makesensfunc(scifiles, objname, base_stddir):
for f in scifiles:
# Find the standard star file
standard_file = get_standard_file(objname, base_stddir)
redorblue = getredorblue(f)
setupname = file_utils.getsetupname(f)
# If this is a standard star, run standard
# Standards will have an observation class of either progCal or partnerCal
obsclass = fits.getval(f[:-4] + '.fits', 'OBSCLASS')
if obsclass == 'progCal' or obsclass == 'partnerCal':
wavelengths_filename = setupname + '.wavelengths.fits'
specsens('xet' + f[:-4] + '.fits', 'sens' + redorblue + '.fits',
standard_file, wavelengths_filename, float(fits.getval(f[:-4] + '.fits', 'EXPTIME')))
def specsens(specfile, outfile, stdfile, wavelengths_filename, exptime=None,
stdzp=3.68e-20, thresh=8, clobber=True):
# Read in the reference star spectrum
standard = ascii.read(stdfile, comment='#')
# Read in the observed data
observed_hdu = fits.open(specfile)
observed_data = observed_hdu[2].data[0]
observed_wavelengths = fits_utils.fitshdr_to_wave(observed_hdu[2].header)
telluric_model = lcogtgemini.file_utils.read_telluric_model(observed_hdu[0].header['MASKNAME'])
# ignored the chip gaps
good_pixels = observed_data > 0
# Clip the edges of the detector where craziness happen.
good_pixels[:10] = False
good_pixels[-10:] = False
bad_pixels = combine.find_bad_pixels(observed_data)
in_telluric = np.logical_and(observed_wavelengths >= 6640.0, observed_wavelengths <= 7040.0)
in_telluric = np.logical_or(in_telluric, np.logical_and(observed_wavelengths >= 7550.0, observed_wavelengths <= 7750.0))
bad_pixels[in_telluric] = False
good_pixels = np.logical_and(good_pixels, ~bad_pixels)
# Fit a combination of the telluric absorption multiplied by a constant + a polynomial-fourier model of
# sensitivity
wavelengths_hdu = fits.open(wavelengths_filename)
chips = utils.get_wavelengths_of_chips(wavelengths_hdu)
need_to_interplolate = np.ones(observed_hdu[2].data[0].shape, dtype=bool)
for chip in chips:
in_chip = np.logical_and(observed_wavelengths >= min(chip), observed_wavelengths <= max(chip))
standard_scale = np.median(np.interp(observed_wavelengths[in_chip], standard['col1'], standard['col2']))
standard['col2'] /= standard_scale
best_fit, n_poly, n_fourier = fit_sensitivity(observed_wavelengths[in_chip], observed_data[in_chip],
telluric_model['col1'], telluric_model['col2'], standard['col1'], standard['col2'],
3, 11, float(observed_hdu['SCI'].header['RDNOISE']), good_pixels[in_chip])
# Strip out the telluric correction
best_fit['popt'] = best_fit['popt'][6:]
best_fit['model_function'] = fitting.polynomial_fourier_model(n_poly, n_fourier)
# Save the sensitivity in magnitudes
sensitivity = standard_scale / fitting.eval_fit(best_fit, observed_wavelengths[in_chip]) * float(observed_hdu[0].header['EXPTIME'])
observed_hdu[2].data[0][in_chip] = utils.fluxtomag(sensitivity)
need_to_interplolate[in_chip] = False
standard['col2'] *= standard_scale
observed_hdu[2].data = observed_hdu[2].data[0]
observed_hdu[2].data[need_to_interplolate] = np.interp(observed_wavelengths[need_to_interplolate],
observed_wavelengths[~need_to_interplolate],
observed_hdu[2].data[~need_to_interplolate])
observed_hdu[2].header += observed_hdu[0].header
observed_hdu[2].header['OBSTYPE'] = 'SENS'
observed_hdu[2].header['OBSCLASS'] = 'sensitivity'
observed_hdu[2].writeto(outfile)
def make_sensitivity_model(n_poly, n_fourier, telluric_waves, telluric_correction, std_waves, std_flux,
wavelength_min, wavelength_range):
poly_fourier_model = fitting.polynomial_fourier_model(n_poly, n_fourier)
normalized_telluric_wavelengths = (telluric_waves - wavelength_min) / wavelength_range
normalized_standard_wavelengths = (std_waves - wavelength_min) / wavelength_range
def sensitivity_model(x, *p):
# p 0, 1, 2 are for telluric fitting.
# 0 and 1 linear wavelength shift and scale for telluric
# 2 is power of telluric correction for the O2 A and B bands
# 3 is the power of the telluric correction for the water bands (the rest of the telluric features)
shifted_telluric_wavelengths = p[1] * (normalized_telluric_wavelengths - p[0])
telluric_model = np.interp(x, shifted_telluric_wavelengths, telluric_correction,
left=1.0, right=1.0)
in_A = np.logical_and(x >= (6821. - wavelength_min) / wavelength_range, x <= (7094. - wavelength_min) / wavelength_range)
in_B = np.logical_and(x <= (7731. - wavelength_min) / wavelength_range, x >= (7562. - wavelength_min) / wavelength_range)
in_AB = np.logical_or(in_A, in_B)
telluric_model[in_AB] **= (p[2] ** 0.55)
telluric_model[~in_AB] **= (p[3] ** 0.55)
# p 3, 4 are linear wavelength shift and scale for the standard model
std_model = np.interp(x, p[5] * (normalized_standard_wavelengths - p[4]), std_flux)
return poly_fourier_model(x, *p[6:]) * telluric_model * std_model
return sensitivity_model
def fit_sensitivity(wavelengths, data, telluric_waves, telluric_correction, std_waves, std_flux, n_poly,
n_fourier, readnoise, good_pixels, weight_scale=2.0):
_, wavelength_min, wavelength_range = fitting.normalize_fitting_coordinate(wavelengths)
function_to_fit = make_sensitivity_model(n_poly, n_fourier, telluric_waves, telluric_correction, std_waves, std_flux,
wavelength_min, wavelength_range)
def init_p0(n_poly, n_fourier):
p0 = np.zeros(7 + n_poly + 2 * n_fourier)
p0[0] = 0.0
p0[1] = 1.0
p0[2] = 1.0
p0[3] = 0.1
p0[4] = 0.0
p0[5] = 1.0
p0[6] = 1.0
return p0
p0 = init_p0(n_poly, n_fourier)
errors = np.sqrt(np.abs(data) + readnoise ** 2.0)
best_fit = fitting.run_fit(wavelengths, data, errors, function_to_fit, p0, weight_scale, good_pixels)
# Go into a while loop
while True:
# Ask if the user is not happy with the fit,
response = fitting.user_input('Does this fit look good? y or n:', ['y', 'n'], 'y')
if response == 'y':
break
# If not have the user put in new values
else:
n_poly = int(fitting.user_input('Order of polynomial to fit:', [str(i) for i in range(100)], n_poly))
n_fourier = int(fitting.user_input('Order of Fourier terms to fit:', [str(i) for i in range(100)], n_fourier))
weight_scale = fitting.user_input('Scale for outlier rejection:', default=weight_scale, is_number=True)
p0 = init_p0(n_poly, n_fourier)
function_to_fit = make_sensitivity_model(n_poly, n_fourier, telluric_waves, telluric_correction, std_waves,
std_flux, wavelength_min, wavelength_range)
best_fit = fitting.run_fit(wavelengths, data, errors, function_to_fit, p0, weight_scale, good_pixels)
return best_fit, n_poly, n_fourier
| 9,004 | 49.307263 | 139 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/fitting.py | import numpy as np
from scipy import optimize
from statsmodels import robust
from lcogtgemini.utils import mad
from matplotlib import pyplot
def ncor(x, y):
"""Calculate the normalized correlation of two arrays"""
d = np.correlate(x, x) * np.correlate(y, y)
return np.correlate(x, y) / d ** 0.5
def xcorfun(p, warr, farr, telwarr, telfarr):
# Telluric wavelengths and flux
# Observed wavelengths and flux
# resample the telluric spectrum at the same wavelengths as the observed
# spectrum
# Make the artifical spectrum to cross correlate
asfarr = np.interp(warr, p[0] * telwarr + p[1], telfarr, left=1.0, right=1.0)
return np.abs(1.0 / ncor(farr, asfarr))
def normalize_fitting_coordinate(x):
x_range = x.max() - x.min()
return (x - x.min()) / x_range, x.min(), x_range
# Iterative reweighting linear least squares
def irls(x, data, errors, model_function, initial_parameter_guess, good_pixels,
tol=1e-6, weight_function=robust.norms.AndrewWave, weight_scale=2.0, maxiter=10):
weights_calculator = weight_function(weight_scale)
#Normalize to make fitting easier
normalized_x, xmin, x_range = normalize_fitting_coordinate(x)
y_scale = np.median(data[good_pixels])
y = data[good_pixels] / y_scale
scatter = errors[good_pixels] / y_scale
# Do an initial fit of the model
# Use 1 / sigma^2 as weights
best_parameters = optimize.curve_fit(model_function, normalized_x[good_pixels], y, p0=initial_parameter_guess, sigma=scatter)[0]
notconverged=True
last_chi2 = np.inf
iter = 0
# Until converged
while notconverged:
# Update the weights
residuals = y - model_function(normalized_x[good_pixels], *best_parameters)
# Save the chi^2 to check for convergence
chi2 = ((residuals / scatter) ** 2.0).sum()
# update the scaling (the MAD of the residuals)
scatter = mad(residuals) * 1.4826 # To convert to standard deviation
weights = weights_calculator.weights(residuals / scatter).flatten()
fit_errors = np.zeros(weights.shape)
fit_errors[weights > 0] = weights[weights > 0] ** -2.0
fit_errors[weights == 0] = np.inf
# refit
best_parameters = optimize.curve_fit(model_function, normalized_x[good_pixels], y,
p0=best_parameters, sigma=fit_errors)[0]
# converged when the change in the chi^2 (or l2 norm or whatever) is
# less than the tolerance. Hopefully this should converge quickly.
if iter >= maxiter or np.abs(chi2 - last_chi2) < tol:
notconverged = False
else:
last_chi2 = chi2
iter += 1
return {'popt': best_parameters, 'y_scale': y_scale, 'xmin': xmin, 'x_range': x_range,
'model_function': model_function}
def eval_fit(fit_dict, x):
x_to_eval = (x - fit_dict['xmin']) / fit_dict['x_range']
return fit_dict['model_function'](x_to_eval, *fit_dict['popt']) * fit_dict['y_scale']
def polynomial_fourier_model(n_poly, n_fourier):
def model_to_optimize(x, *p):
y = p[0]
for i in range(1, n_poly + 1):
y += p[i] * x ** i
# Note this assumes that x is roughly normalized between 0 and 1
omega_t = 2.0 * np.pi * x
for i in range(1, n_fourier + 1):
y += p[2 * i - 1 + n_poly] * np.sin(i * omega_t)
y += p[2 * i + n_poly] * np.cos(i * omega_t)
return y
return model_to_optimize
def run_polynomal_fourier_fit(x, y, errors, n_poly, n_fourier, weight_scale, good_pixels):
function_to_fit = polynomial_fourier_model(n_poly, n_fourier)
p0 = np.zeros(1 + n_poly + 2 * n_fourier)
p0[0] = 1.0
best_fit = run_fit(x, y, errors, function_to_fit, p0, weight_scale, good_pixels)
return best_fit
def run_fit(x, y, errors, function_to_fit, p0, weight_scale, good_pixels):
# Run IRLS on the data given the input parameters
best_fit = irls(x, y, errors, function_to_fit, p0, good_pixels, weight_scale=weight_scale)
# Plot the best fit
plot_best_fit(x, y, best_fit, good_pixels),
return best_fit
def fit_polynomial_fourier_model(x, y, errors, n_poly, n_fourier, good_pixels, weight_scale=2.0):
best_fit = run_polynomal_fourier_fit(x, y, errors, n_poly, n_fourier, weight_scale, good_pixels)
# Go into a while loop
while True:
# Ask if the user is not happy with the fit,
response = user_input('Does this fit look good? y or n:', ['y', 'n'], 'y')
if response == 'y':
break
# If not have the user put in new values
else:
n_poly = int(user_input('Order of polynomial to fit:', [str(i) for i in range(100)], n_poly))
n_fourier = int(user_input('Order of Fourier terms to fit:', [str(i) for i in range(100)], n_fourier))
weight_scale = user_input('Scale for outlier rejection:', default=weight_scale, is_number=True)
best_fit = run_polynomal_fourier_fit(x, y, errors, n_poly, n_fourier, weight_scale, good_pixels)
return best_fit
def user_input(prompt, choices=None, default=None, is_number=False):
while True:
response = raw_input(prompt + ' [{i}]'.format(i=default))
if len(response) == 0:
response = default
break
if choices is not None and response in choices:
break
elif is_number:
try:
response = float(response)
break
except:
print('Input could not be parsed into a number. Please try again.')
else:
print('Please select a valid response')
return response
def plot_best_fit(x, y, best_fit, good_pixels):
fig = pyplot.gcf()
fig.clf()
axes = fig.get_axes()
if len(axes) == 0:
ax1 = pyplot.subplot(211)
pyplot.subplot(212, sharex=ax1)
axes = fig.get_axes()
axes[0].plot(x, y, 'b')
y_model = eval_fit(best_fit, x)
axes[0].plot(x, y_model, 'r')
axes[1].plot(x[good_pixels], y[good_pixels] - y_model[good_pixels], 'o', markersize=1.0)
def fitxcor(warr, farr, telwarr, telfarr):
"""Maximize the normalized cross correlation coefficient for the telluric
correction
"""
res = optimize.minimize(xcorfun, [1.0, 0.0], method='Nelder-Mead',
args=(warr, farr, telwarr, telfarr))
return res['x']
| 6,446 | 35.630682 | 132 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/telluric.py | import os
import numpy as np
from astropy.io import ascii
from astropy.io import fits
import lcogtgemini.file_utils
from lcogtgemini import combine
from lcogtgemini import fits_utils
from lcogtgemini import fitting
# Taken from the Berkley telluric correction
# telluricWaves = [(2000., 3190.), (3216., 3420.), (5500., 6050.), (6250., 6360.),
# (6450., 6530.), (6840., 7410.), (7550., 8410.), (8800., 9800.)]
# Taken from Moehler et al 2014 (on eg274)
# telluricWaves = [(5855., 5992.), (6261., 6349.), (6438., 6600.), (6821., 7094.),
# (7127., 7434.), (7562., 7731.), (7801., 8613.), (8798., 10338.)]
from lcogtgemini.file_utils import read_telluric_model
telluricWaves = [(5500., 6050.), (6250., 6360.), (6438., 6530.), (6821., 7434.), (7550., 8613.), (8798., 10338.)]
def telluric_correct(input_files):
output_files = []
for filename in input_files:
# Get the standard to use for telluric correction
hdu = fits.open(filename)
waves = fits_utils.fitshdr_to_wave(hdu['SCI'].header)
telluric_correction = ascii.read('telcor.dat')
telwave = telluric_correction['col1']
telspec = telluric_correction['col2']
# Cross-correlate the standard star and the sci spectra
# to find wavelength shift of standard star.
w = np.logical_and(waves > 7500., waves < 7800.)
tw = np.logical_and(telwave > 7500., telwave < 7800.)
good_pixels = hdu['SCI'].data[0][w] != 0
if good_pixels.sum() == 0:
p = [1.0, 0.0]
else:
cleaned_data = np.interp(waves[w], waves[w][good_pixels], hdu['SCI'].data[0][w][good_pixels])
p = fitting.fitxcor(waves[w],cleaned_data, telwave[tw], telspec[tw])
# shift and stretch standard star spectrum to match science
# spectrum.
telcorr = np.interp(waves, p[0] * telwave + p[1], telspec, left=1.0, right=1.0)
# Correct for airmass
airmass = float(hdu[0].header['AIRMASS'])
telcorr = telcorr ** (airmass ** 0.55)
# Divide science spectrum by transformed standard star sub-spectrum
hdu['SCI'].data[0] /= telcorr
outfile = 't'+filename
output_files.append(outfile)
# Copy telluric-corrected data to new file.
hdu.writeto(outfile)
return output_files
def mktelluric(filename, objname, base_stddir):
observed_hdu = fits.open(filename)
observed_wavelengths = fits_utils.fitshdr_to_wave(observed_hdu[0].header)
observed_data = observed_hdu[0].data
maskname = observed_hdu[0].header['MASKNAME']
telluric_model = read_telluric_model(maskname)
# Read in the standard file
standard_filename = lcogtgemini.file_utils.get_standard_file(objname, base_stddir)
standard = lcogtgemini.file_utils.read_standard_file(standard_filename, maskname)
good_pixels = observed_data > 0
# Clip the edges of the detector where craziness happen.
good_pixels[:20] = False
good_pixels[-20:] = False
bad_pixels = combine.find_bad_pixels(observed_data)
in_telluric = np.logical_and(observed_wavelengths >= 6640.0, observed_wavelengths <= 7040.0)
in_telluric = np.logical_or(in_telluric, np.logical_and(observed_wavelengths >= 7550.0, observed_wavelengths <= 7750.0))
bad_pixels[in_telluric] = False
good_pixels = np.logical_and(good_pixels, ~bad_pixels)
standard_scale = np.median(np.interp(observed_wavelengths[good_pixels], standard['col1'], standard['col2']))
standard['col2'] /= standard_scale
# Interpolate the standard file onto the science wavelengths
# Shift the standard star model by the same amount used in the sensitivity function
# sensitivity
best_fit = fit_standard(observed_wavelengths, observed_hdu[0].data,
telluric_model['col1'], telluric_model['col2'], standard['col1'], standard['col2'],
good_pixels)
standard['col2'] *= standard_scale
normalized_standard_wavelengths = (standard['col1'] - best_fit['xmin']) / best_fit['x_range']
recaled_standard_wavelengths = best_fit['popt'][5] * (normalized_standard_wavelengths - best_fit['popt'][4])
standard_wavelengths = recaled_standard_wavelengths * best_fit['x_range'] + best_fit['xmin']
standard_flux = np.interp(observed_wavelengths, standard_wavelengths, best_fit['popt'][6] * standard['col2'])
# In the telluric regions
in_telluric = np.zeros(observed_wavelengths.shape, dtype=bool)
for region in telluricWaves:
in_region = np.logical_and(observed_wavelengths >= region[0], observed_wavelengths <= region[1])
in_telluric[in_region] = True
# Divide the observed by the standard
# Fill the rest with ones
correction = np.ones(observed_wavelengths.shape)
in_telluric = np.logical_and(observed_hdu[0].data > 0, in_telluric)
correction[in_telluric] = observed_hdu[0].data[in_telluric] / standard_flux[in_telluric]
# Raise the whole telluric correction to the airmass ** -0.55 power
# See matheson's paper. This normalizes things to airmass 1
correction **= float(observed_hdu[0].header['AIRMASS']) ** -0.55
ascii.write({'wavelengths': observed_wavelengths, 'telluric': correction}, 'telcor.dat',
names=['wavelengths', 'telluric'], format='fast_no_header')
def telluric_correction_exists():
return os.path.exists('telcor.dat')
def make_standard_model(telluric_waves, telluric_correction, std_waves, std_flux,
wavelength_min, wavelength_range):
normalized_telluric_wavelengths = (telluric_waves - wavelength_min) / wavelength_range
normalized_standard_wavelengths = (std_waves - wavelength_min) / wavelength_range
def standard_mode(x, *p):
# p 0, 1, 2 are for telluric fitting.
# 0 and 1 linear wavelength shift and scale for telluric
# 2 is power of telluric correction for the O2 A and B bands
# 3 is the power of the telluric correction for the water bands (the rest of the telluric features)
shifted_telluric_wavelengths = p[1] * (normalized_telluric_wavelengths - p[0])
telluric_model = np.interp(x, shifted_telluric_wavelengths, telluric_correction,
left=1.0, right=1.0)
in_A = np.logical_and(x >= (6821. - wavelength_min) / wavelength_range, x <= (7094. - wavelength_min) / wavelength_range)
in_B = np.logical_and(x <= (7731. - wavelength_min) / wavelength_range, x >= (7562. - wavelength_min) / wavelength_range)
in_AB = np.logical_or(in_A, in_B)
telluric_model[in_AB] **= (p[2] ** 0.55)
telluric_model[~in_AB] **= (p[3] ** 0.55)
# p 3, 4 are linear wavelength shift and scale for the standard model
std_model = np.interp(x, p[5] * (normalized_standard_wavelengths - p[4]), std_flux)
return p[6] * telluric_model * std_model
return standard_mode
def fit_standard(wavelengths, data, telluric_waves, telluric_correction, std_waves, std_flux, good_pixels,
weight_scale=20.0):
_, wavelength_min, wavelength_range = fitting.normalize_fitting_coordinate(wavelengths)
function_to_fit = make_standard_model(telluric_waves, telluric_correction, std_waves, std_flux,
wavelength_min, wavelength_range)
def init_p0():
p0 = np.zeros(7)
p0[0] = 0.0
p0[1] = 1.0
p0[2] = 1.0
p0[3] = 1.0
p0[4] = 0.0
p0[5] = 1.0
p0[6] = 1.0
return p0
p0 = init_p0()
errors = np.sqrt(np.abs(data) * 0.01)
best_fit = fitting.run_fit(wavelengths, data, errors, function_to_fit, p0, weight_scale, good_pixels)
# Go into a while loop
while True:
# Ask if the user is not happy with the fit,
response = fitting.user_input('Does this fit look good? y or n:', ['y', 'n'], 'y')
if response == 'y':
break
# If not have the user put in new values
else:
weight_scale = fitting.user_input('Scale for outlier rejection:', default=weight_scale, is_number=True)
p0 = init_p0()
function_to_fit = make_standard_model(telluric_waves, telluric_correction, std_waves,
std_flux, wavelength_min, wavelength_range)
best_fit = fitting.run_fit(wavelengths, data, errors, function_to_fit, p0, weight_scale, good_pixels)
return best_fit
| 8,487 | 45.895028 | 129 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/file_utils.py | import numpy as np
from astropy.convolution import convolve, Gaussian1DKernel
from astropy.io import fits, ascii
import os
from glob import glob
from pyraf import iraf
def getobstypes(fs):
# get the type of observation for each file
obstypes = []
obsclasses = []
for f in fs:
obstypes.append(fits.getval(f, 'OBSTYPE', ext=0))
obsclasses.append(fits.getval(f, 'OBSCLASS', ext=0))
obstypes = np.array(obstypes)
obsclasses = np.array(obsclasses)
return obstypes, obsclasses
def getobjname(fs, obstypes):
objname = fits.getval(fs[obstypes == 'OBJECT'][0], 'OBJECT', ext=0).lower()
# get rid of nonsense in the name (like the plus and whitespace
objname = objname.replace('+', '')
objname = ''.join(objname.split())
# replace ltt with just l
objname = objname.replace('ltt', 'l')
return objname
def getredorblue(f):
return f.split('.')[1][1]
def getsetupname(f, calfile=False):
if calfile:
setupname = f.split('.')[0] + '.' + f.split('.')[1]
else:
# Get the setup base name by removing the exposure number
setupname = f.split('.')[0] + '.' + f.split('.')[1][1:]
return setupname
def gettxtfiles(fs, objname):
flatfiles = np.array(glob('*.flat.txt'))
# reduce the CuAr arcfiles. Not flat fielded, gaps are not fixpixed
arcfiles = np.array(glob('*.arc.txt'))
# reduce the science files
scifiles = glob(objname + '*.txt')
nonscifiles = []
# remove the arcs and flats
for f in scifiles:
if 'arc' in f or 'flat' in f: nonscifiles.append(f)
for f in nonscifiles:
scifiles.remove(f)
scifiles = np.array(scifiles)
return flatfiles, arcfiles, scifiles
def get_base_name(f):
objname = getobjname(np.array([f]), np.array(['OBJECT']))
# Drop the raw/
fname = f.split('/')[-1]
# red or blue setting
redblue = fits.getval(f, 'GRATING')[0].lower()
# central wavelength
lamcentral = fits.getval(f, 'CENTWAVE')
return '%s.%s%i' % (objname, redblue, lamcentral)
def maketxtfiles(fs, obstypes, obsclasses, objname):
# go through each of the files (Ignore bias and aquisition files)
goodfiles = np.logical_and(obsclasses != 'acqCal', obsclasses != 'acq')
goodfiles = np.logical_and(goodfiles, obstypes != 'BIAS')
goodfiles = np.logical_and(goodfiles, obstypes!='BPM')
goodfiles = np.logical_and(goodfiles, obsclasses != 'sensitivity')
correct_names = np.logical_or([os.path.basename(f)[0] == 'S' for f in fs],
[os.path.basename(f)[0] == 'N' for f in fs])
goodfiles = np.logical_and(correct_names, goodfiles)
for f in fs[goodfiles]:
# put the filename in the correct text file.
obsstr = ''
obstype = fits.getval(f, 'OBSTYPE', ext=0)
if obstype != 'OBJECT':
obsstr = '.' + obstype.lower()
expnum = ''
else:
expnum = 1
# Drop the raw/
fname = f.split('/')[-1]
# red or blue setting
redblue = fits.getval(f, 'GRATING')[0].lower()
# central wavelength
lamcentral = fits.getval(f, 'CENTWAVE')
txtname = '%s.%s%s%i%s.txt' % (objname, str(expnum), redblue, lamcentral, obsstr)
# If more than one arc or flat, append to the text file
if os.path.exists(txtname):
if obsstr == '.flat' or obsstr == '.arc':
# write to a text file
txtfile = open(txtname, 'a')
else:
# We need to increment the exposure number
moreimages = True
expnum += 1
while moreimages:
txtname = '%s.%s%s%i%s.txt' % (objname, str(expnum), redblue, lamcentral, obsstr)
if not os.path.exists(txtname):
txtfile = open(txtname, 'w')
moreimages = False
else:
expnum += 1
else:
txtfile = open(txtname, 'w')
txtfile.write(fname + '\n')
txtfile.close()
def get_images_from_txt_file(filename):
with open(filename) as f:
lines = f.read().splitlines()
return lines
def get_standard_file(objname, base_stddir):
if os.path.exists(objname+'.std.dat'):
standard_file = objname+'.std.dat'
else:
standard_file = os.path.join(iraf.osfn('gmisc$lib/onedstds/'), base_stddir, objname + '.dat')
return standard_file
def read_standard_file(filename, maskname):
standard = ascii.read(filename)
standard['col2'] = smooth(maskname, standard['col2'])
return standard
def read_telluric_model(maskname):
# Read in the telluric model
telluric = ascii.read('telluric_model.dat')
telluric['col2'] = smooth(maskname, telluric['col2'])
return telluric
def smooth(maskname, data):
# Smooth the spectrum to the size of the slit
# I measured 5 angstroms FWHM for a 1 arcsecond slit
# the 2.355 converts to sigma
smoothing_scale = 5.0 * float(maskname.split('arc')[0]) / 2.355
return convolve(data, Gaussian1DKernel(stddev=smoothing_scale)) | 5,173 | 30.54878 | 101 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/__init__.py | #!/usr/bin/env python
'''
Created on Nov 7, 2014
@author: cmccully
'''
import os
from pyraf import iraf
iraf.cd(os.getcwd())
iraf.gemini()
iraf.gmos()
iraf.twodspec()
iraf.apextract()
iraf.onedspec()
bluecut = 3450
iraf.gmos.logfile = "log.txt"
iraf.gmos.mode = 'h'
iraf.set(clobber='yes')
iraf.set(stdimage='imtgmos')
dooverscan = False
is_GS = False
do_qecorr = False
dobias = False
dodq = False
xchip_shifts = [0.0, 0.0, 0.0]
ychip_shifts = [0.0, 0.0, 0.0]
chip_rotations = [0.0, 0.0, 0.0]
chip_gap_size = 0.0
namps = 0
nchips = 3
| 542 | 13.289474 | 32 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/sky.py | import lcogtgemini
from pyraf import iraf
def skysub(scifiles, rawpath):
for f in scifiles:
# sky subtraction
# output has an s prefixed on the front
# This step is currently quite slow for Gemini-South data
iraf.unlearn(iraf.gsskysub)
iraf.gsskysub('t' + f[:-4], long_sample='*', fl_inter='no', fl_vardq=lcogtgemini.dodq,
naverage=-10, order=1, low_reject=2.0, high_reject=2.0,
niterate=10, mode='h') | 490 | 39.916667 | 94 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/bias.py | import lcogtgemini
from glob import glob
from astropy.io import fits
from pyraf import iraf
import numpy as np
def makebias(fs, obstypes, rawpath):
for f in fs:
if f[-10:] == '_bias.fits':
iraf.cp(f, 'bias.fits')
elif 'bias' in f:
iraf.cp(f, './')
if len(glob('bias*.fits')) == 0:
bias_files = fs[obstypes == 'BIAS']
binnings = [fits.getval(f, 'CCDSUM', 1).replace(' ', 'x') for f in bias_files]
for binning in list(set(binnings)):
# Make the master bias
biastxtfile = open('bias{binning}.txt'.format(binning=binning), 'w')
biasfs = bias_files[np.array(binnings) == binning]
for f in biasfs:
biastxtfile.writelines(f.split('/')[-1] + '\n')
biastxtfile.close()
iraf.gbias('@%s/bias{binning}.txt'.format(binning=binning) % os.getcwd(),
'bias{binning}'.format(binning=binning), rawpath=rawpath, fl_over=lcogtgemini.dooverscan,
fl_vardq=lcogtgemini.dodq) | 1,059 | 38.259259 | 112 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/qe.py | import lcogtgemini
import os
from pyraf import iraf
def make_qecorrection(arcfiles):
for f in arcfiles:
#read in the arcfile name
with open(f) as txtfile:
arcimage = txtfile.readline()
# Strip off the newline character
arcimage = 'g' + arcimage.split('\n')[0]
if not os.path.exists(f[:-8] +'.qe.fits'):
iraf.gqecorr(arcimage, refimages=f[:-4]+'.arc.fits', fl_correct=False, fl_keep=True,
corrimages=f[:-8] +'.qe.fits', verbose=True, fl_vardq=lcogtgemini.dodq)
| 562 | 36.533333 | 96 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/extinction.py | from astropy.io import ascii, fits
from lcogtgemini import fits_utils
import numpy as np
def correct_for_extinction(scifiles, extfile):
# Read in the extinction file
extinction_correction = ascii.read(extfile)
# Convert the extinction to flux
extinction_correction['col2'] = 10**(-0.4 * extinction_correction['col2'])
for f in scifiles:
# read in the science file
hdu = fits.open('et' + f.replace('.txt', '.fits'))
airmass = float(hdu[0].header['AIRMASS'])
wavelengths = fits_utils.fitshdr_to_wave(hdu['SCI'].header)
# Linearly interpolate the extinction correction to the wavelengths of the spectrum
corrections = np.interp(wavelengths, extinction_correction['col1'], extinction_correction['col2'])
corrections *= 10 ** (-0.4 * airmass)
# Divide the spectrum by the extinction correction
hdu['SCI'].data /= corrections
# Save the file to an extinction corrected file
hdu.writeto('xet' + f.replace('.txt', '.fits'))
| 1,028 | 40.16 | 106 | py |
lcogtgemini | lcogtgemini-master/lcogtgemini/integration.py | import numpy as np
class integrate:
def __init__(self):
self.trapweights = np.zeros((1, 1))
self.simp2dweights = np.zeros((1, 1))
self.simp4dweights = np.zeros((1, 1, 1, 1))
def sum4d(self, d, binx, biny):
return d.sum(axis=3).sum(axis=2) / binx / biny
# Define a 2D trapezoidal rule for integration
# If we subsample, this should be good enough and much faster than double quad
def trap2d(self, d, dx, dy):
# d is the data 2d array
# y coordinate is first index
if self.trapweights.shape != d.shape:
self.trapweights = 4.0 * np.ones(d.shape)
self.trapweights[:, 0] = 2.0
self.trapweights[0, :] = 2.0
self.trapweights[-1, :] = 2.0
self.trapweights[:, -1] = 2.0
self.trapweights[0, 0] = 1.0
self.trapweights[-1, -1] = 1.0
self.trapweights[0, -1] = 1.0
self.trapweights[-1, 0] = 1.0
result = self.trapweights * d
return 0.25 * dx * dy * result.sum()
# Simpson's rule for 2d integration
def simp2d(self, d, dx, dy):
if self.simp2dweights.shape != d.shape:
self.simp2dweights = np.ones(d.shape)
self.simp2dweights[1::2, 1:-1:2] = 16
self.simp2dweights[1::2, 2:-1:2] = 8
self.simp2dweights[2::2, 1:-1:2] = 8
self.simp2dweights[2::2, 2:-1:2] = 4
self.simp2dweights[0, 1::2] = 4
self.simp2dweights[0, 2::2] = 2
self.simp2dweights[-1, 1::2] = 4
self.simp2dweights[-1, 2::2] = 2
self.simp2dweights[1::2, 0] = 4
self.simp2dweights[2::2, 0] = 2
self.simp2dweights[1::2, -1] = 4
self.simp2dweights[2::2, -1] = 2
self.simp2dweights[0, 0] = 1
self.simp2dweights[0, -1] = 1
self.simp2dweights[-1, 0] = 1
self.simp2dweights[-1, -1] = 1
result = d * self.simp2dweights
return dx * dy / 9.0 * result.sum()
# Integrate a 4D array into a 2D image using Simpson's Rule
def simp4d(self, d, dx, dy):
if self.simp4dweights.shape != d.shape:
self.simp4dweights = np.ones(d.shape[2:])
self.simp4dweights[1::2, 1:-1:2] = 16
self.simp4dweights[1::2, 2:-1:2] = 8
self.simp4dweights[2::2, 1:-1:2] = 8
self.simp4dweights[2::2, 2:-1:2] = 4
self.simp4dweights[0, 1::2] = 4
self.simp4dweights[0, 2::2] = 2
self.simp4dweights[-1, 1::2] = 4
self.simp4dweights[-1, 2::2] = 2
self.simp4dweights[1::2, 0] = 4
self.simp4dweights[2::2, 0] = 2
self.simp4dweights[1::2, -1] = 4
self.simp4dweights[2::2, -1] = 2
self.simp4dweights[0, 0] = 1
self.simp4dweights[0, -1] = 1
self.simp4dweights[-1, 0] = 1
self.simp4dweights[-1, -1] = 1
result = d
result[:, :] *= self.simp4dweights
return dx * dy / 9.0 * result.sum(axis=3).sum(axis=2)
def make4d(self, d, nx, ny, subsampx, subsampy):
# This converts a 2d array into a 4d array that is easy to integrate
# Time for some Python reshape magic
# This code is a bear to test and debug
# Don't ask how long it took me to figure this out
# It is orders of magnitude faster than a for loop though
result4d = np.zeros((ny, nx, subsampy + 1, subsampx + 1))
result4d[:, :, :-1, :-1] = d[:-1, :-1].reshape((ny, subsampy, nx, subsampx)).swapaxes(1, 2)
result4d[:, :, :-1, -1] = d[:-1, subsampx::subsampx].reshape(ny, subsampy, nx).swapaxes(1, 2)
result4d[:, :, -1, :-1] = d[subsampy::subsampy, :-1].reshape(ny, nx, subsamy)
result4d[:, :, -1, -1] = d[subsampy::subsampy, subsampx::subsampx]
return result4d | 3,878 | 41.163043 | 101 | py |
anonymeter | anonymeter-main/src/anonymeter/__init__.py | 0 | 0 | 0 | py | |
anonymeter | anonymeter-main/src/anonymeter/neighbors/mixed_types_kneighbors.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
"""Nearest neighbor search for mixed type data."""
import logging
from math import fabs, isnan
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from numba import float64, int64, jit
from anonymeter.preprocessing.transformations import mixed_types_transform
from anonymeter.preprocessing.type_detection import detect_consistent_col_types
logger = logging.getLogger(__name__)
@jit(nopython=True, nogil=True)
def gower_distance(r0: np.ndarray, r1: np.ndarray, cat_cols_index: np.ndarray) -> float64:
r"""Distance between two records inspired by the Gower distance [1].
To handle mixed type data, the distance is specialized for numerical (continuous)
and categorical data. For numerical records, we use the L1 norm,
computed after the columns have been normalized so that :math:`d(a_i, b_i)\leq 1`
for every :math:`a_i`, :math:`b_i`. For categorical, :math:`d(a_i, b_i)` is 1,
if the entries :math:`a_i`, :math:`b_i` differ, else, it is 0.
Notes
-----
To keep the balance between numerical and categorical values, the input records
have to be properly normalized. Their numerical part need to be scaled so that
the difference between any two values of a column (from both dataset) is *at most* 1.
References
----------
[1]. `Gower (1971) "A general coefficient of similarity and some of its properties.
<https://www.jstor.org/stable/2528823?seq=1>`_
Parameters
----------
r0 : np.array
Input array of shape (D,).
r1 : np.array
Input array of shape (D,).
cat_cols_index : int
Index delimiting the categorical columns in r0/r1 if present. For example,
``r0[:cat_cols_index]`` are the numerical columns, and ``r0[cat_cols_index:]`` are
the categorical ones. For a fully numerical dataset, use ``cat_cols_index =
len(r0)``. For a fully categorical one, set ``cat_cols_index`` to 0.
Returns
-------
float
distance between the records.
"""
dist = 0.0
for i in range(len(r0)):
if isnan(r0[i]) and isnan(r1[i]):
dist += 1
else:
if i < cat_cols_index:
dist += fabs(r0[i] - r1[i])
else:
if r0[i] != r1[i]:
dist += 1
return dist
@jit(nopython=True, nogil=True)
def _nearest_neighbors(queries, candidates, cat_cols_index, n_neighbors):
r"""For every element of ``queries``, find its nearest neighbors in ``candidates``.
Parameters
----------
queries : np.ndarray
Input array of shape (Nx, D).
candidates : np.ndarray
Input array of shape (Ny, D).
n_neighbors : int
Determines the number of closest neighbors per entry to be returned.
cat_cols_idx : int
Index delimiting the categorical columns in X/Y, if present.
Returns
-------
idx : np.ndarray[int]
Array of shape (Nx, n_neighbors). For each element in ``queries``,
this array contains the indices of the closest neighbors in
``candidates``. That is, ``candidates[idx[i]]`` are the elements of
``candidates`` that are closer to ``queries[i]``.
lps : np.ndarray[float]
Array of shape (Nx, n_neighbors). This array containing the distances
between the record pairs identified by idx.
"""
idx = np.zeros((queries.shape[0], n_neighbors), dtype=int64)
dists = np.zeros((queries.shape[0], n_neighbors), dtype=float64)
for ix in range(queries.shape[0]):
dist_ix = np.zeros((candidates.shape[0]), dtype=float64)
for iy in range(candidates.shape[0]):
dist_ix[iy] = gower_distance(r0=queries[ix], r1=candidates[iy], cat_cols_index=cat_cols_index)
close_match_idx = dist_ix.argsort()[:n_neighbors]
idx[ix] = close_match_idx
dists[ix] = dist_ix[close_match_idx]
return idx, dists
class MixedTypeKNeighbors:
"""Nearest neighbor algorithm for mixed type data.
To handle mixed type data, we use a distance function inspired by the Gower similarity.
The distance is specialized for numerical (continuous) and categorical data. For
numerical records, we use the L1 norm, computed after the columns have been
normalized so that :math:`d(a_i, b_i) <= 1` for every :math:`a_i`, :math:`b_i`.
For categorical, :math:`d(a_i, b_i)` is 1, if the entries :math:`a_i`, :math:`b_i`
differ, else, it is 0.
References
----------
[1]. `Gower (1971) "A general coefficient of similarity and some of its properties.
<https://www.jstor.org/stable/2528823?seq=1>`_
Parameters
----------
n_neighbors : int, default is 5
Determines the number of closest neighbors per entry to be returned.
n_jobs : int, default is -2
Number of jobs to use. It follows joblib convention, so that ``n_jobs = -1``
means all available cores.
"""
def __init__(self, n_neighbors: int = 5, n_jobs: int = -2):
self._n_neighbors = n_neighbors
self._n_jobs = n_jobs
def fit(self, candidates: pd.DataFrame, ctypes: Optional[Dict[str, List[str]]] = None):
"""Prepare for nearest neighbor search.
Parameters
----------
candidates : pd.DataFrame
Dataset containing the records one would find the neighbors in.
ctypes : dict, optional.
Dictionary specifying which columns in X should be treated as
continuous and which should be treated as categorical. For example,
``ctypes = {'num': ['distance'], 'cat': ['color']}`` specify the types
of a two column dataset.
"""
self._candidates = candidates
self._ctypes = ctypes
return self
def kneighbors(
self, queries: pd.DataFrame, n_neighbors: Optional[int] = None, return_distance: bool = False
) -> Union[Tuple[np.ndarray, np.ndarray], np.ndarray]:
"""Find the nearest neighbors for a set of query points.
Note
----
The search is performed in a brute-force fashion. For large datasets
or large number of query points, the search for nearest neighbor will
become very slow.
Parameters
----------
queries : pd.DataFrame
Query points for the nearest neighbor searches.
n_neighbors : int, default is None
Number of neighbors required for each sample.
The default is the value passed to the constructor.
return_distance : bool, default is False
Whether or not to return the distances of the neigbors or
just the indexes.
Returns
-------
np.narray of shape (df.shape[0], n_neighbors)
Array with the indexes of the elements of the fit dataset closer to
each element in the query dataset.
np.narray of shape (df.shape[0], n_neighbors)
Array with the distances of the neighbors pairs. This is optional and
it is returned only if ``return_distances`` is ``True``
"""
if n_neighbors is None:
n_neighbors = self._n_neighbors
if n_neighbors > self._candidates.shape[0]:
logger.warning(
f"Parameter ``n_neighbors``={n_neighbors} cannot be "
f"larger than the size of the training data {self._candidates.shape[0]}."
)
n_neighbors = self._candidates.shape[0]
if self._ctypes is None:
self._ctypes = detect_consistent_col_types(df1=self._candidates, df2=queries)
candidates, queries = mixed_types_transform(
df1=self._candidates, df2=queries, num_cols=self._ctypes["num"], cat_cols=self._ctypes["cat"]
)
cols = self._ctypes["num"] + self._ctypes["cat"]
queries = queries[cols].values
candidates = candidates[cols].values
with Parallel(n_jobs=self._n_jobs, backend="threading") as executor:
res = executor(
delayed(_nearest_neighbors)(
queries=queries[ii : ii + 1],
candidates=candidates,
cat_cols_index=len(self._ctypes["num"]),
n_neighbors=n_neighbors,
)
for ii in range(queries.shape[0])
)
indexes, distances = zip(*res)
indexes, distances = np.vstack(indexes), np.vstack(distances)
if return_distance:
return distances, indexes
return indexes
| 8,813 | 35.878661 | 106 | py |
anonymeter | anonymeter-main/src/anonymeter/neighbors/__init__.py | 0 | 0 | 0 | py | |
anonymeter | anonymeter-main/src/anonymeter/evaluators/linkability_evaluator.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
"""Privacy evaluator that measures the linkability risk."""
import logging
from typing import Dict, List, Optional, Set, Tuple, cast
import numpy as np
import pandas as pd
from anonymeter.neighbors.mixed_types_kneighbors import MixedTypeKNeighbors
from anonymeter.stats.confidence import EvaluationResults, PrivacyRisk
logger = logging.getLogger(__name__)
class LinkabilityIndexes:
"""Utility class to store indexes from linkability attack.
Parameters
----------
idx_0 : np.ndarray
Array containing the result of the nearest neighbor search
between the first original dataset and the synthetic data.
Rows correspond to original records and the i-th column
contains the index of the i-th closest synthetic record.
idx_1 : np.ndarray
Array containing the result of the nearest neighbor search
between the second original dataset and the synthetic data.
Rows correspond to original records and the i-th column
contains the index of the i-th closest synthetic record.
"""
def __init__(self, idx_0: np.ndarray, idx_1: np.ndarray):
self._idx_0 = idx_0
self._idx_1 = idx_1
def find_links(self, n_neighbors: int) -> Dict[int, Set[int]]:
"""Return synthetic records that link originals in the split datasets.
Parameters
----------
n_neighbors : int
Number of neighbors considered for the link search.
Returns
-------
Dict[int, Set[int]]
Dictionary mapping the index of the linking synthetic record
to the index of the linked original record.
"""
if n_neighbors > self._idx_0.shape[0]:
logger.warning(f"Neighbors too large ({n_neighbors}, using {self._idx_0.shape[0]}) instead.")
n_neighbors = self._idx_0.shape[0]
if n_neighbors < 1:
raise ValueError(f"Invalid neighbors value ({n_neighbors}): must be positive.")
links = {}
for ii, (row0, row1) in enumerate(zip(self._idx_0, self._idx_1)):
joined = set(row0[:n_neighbors]) & set(row1[:n_neighbors])
if len(joined) > 0:
links[ii] = joined
return links
def count_links(self, n_neighbors: int) -> int:
"""Count successfully linked records.
Parameters
----------
n_neighbors : int
Number of neighbors considered for the link search.
Returns
-------
int
Number of target records for which the synthetic dataset
has provided the attacker wth means to link them.
"""
links = self.find_links(n_neighbors=n_neighbors)
return _count_links(links)
def _count_links(links: Dict[int, Set[int]]) -> int:
"""Count links."""
linkable: Set[int] = set()
for ori_idx in links.keys():
linkable = linkable | {ori_idx}
return len(linkable)
def _random_links(n_synthetic: int, n_attacks: int, n_neighbors: int) -> np.ndarray:
rng = np.random.default_rng()
return np.array([rng.choice(n_synthetic, size=n_neighbors, replace=False) for _ in range(n_attacks)])
def _random_linkability_attack(n_synthetic: int, n_attacks: int, n_neighbors: int) -> LinkabilityIndexes:
idx_0 = _random_links(n_synthetic=n_synthetic, n_attacks=n_attacks, n_neighbors=n_neighbors)
idx_1 = _random_links(n_synthetic=n_synthetic, n_attacks=n_attacks, n_neighbors=n_neighbors)
return LinkabilityIndexes(idx_0=idx_0, idx_1=idx_1)
def _find_nn(syn: pd.DataFrame, ori: pd.DataFrame, n_jobs: int, n_neighbors: int) -> np.ndarray:
nn = MixedTypeKNeighbors(n_jobs=n_jobs, n_neighbors=n_neighbors)
if syn.ndim == 1:
syn = syn.to_frame()
if ori.ndim == 1:
ori = ori.to_frame()
nn.fit(syn)
return cast(np.ndarray, nn.kneighbors(ori, return_distance=False))
def _linkability_attack(
ori: pd.DataFrame,
syn: pd.DataFrame,
n_attacks: int,
aux_cols: Tuple[List[str], List[str]],
n_neighbors: int,
n_jobs: int,
) -> LinkabilityIndexes:
targets = ori.sample(n_attacks, replace=False)
idx_0 = _find_nn(syn=syn[aux_cols[0]], ori=targets[aux_cols[0]], n_neighbors=n_neighbors, n_jobs=n_jobs)
idx_1 = _find_nn(syn=syn[aux_cols[1]], ori=targets[aux_cols[1]], n_neighbors=n_neighbors, n_jobs=n_jobs)
return LinkabilityIndexes(idx_0=idx_0, idx_1=idx_1)
class LinkabilityEvaluator:
r"""Measure the linkability risk created by a synthetic dataset.
The linkability risk is measured from the success of a linkability attack.
The attack is modeled along the following scenario. The attacker posesses
two datasets, both of which share some columns with the *original* dataset
that was used to generate the synthetic data. Those columns will be
referred to as *auxiliary columns*. The attacker's aim is then to use the
information contained in the synthetic data to connect these two datasets,
i.e. to find records that belong to the same individual.
To model this attack, the original dataset is split vertically into two
parts. Then we try to reconnect the two parts using the synthetic data
by looking for the closest neighbors of the split original records in
the synthetic data. If both splits of an original record have the same
closest synthetic neighbor, they are linked together. The more original
records get relinked in this manner the more successful the attack.
Parameters
----------
ori : pd.DataFrame
Dataframe containing original data.
syn : pd.DataFrame
Dataframe containing synthetic data. It has to have
the same columns as df_ori.
aux_cols : tuple of two lists of strings or tuple of int, optional
Features of the records that are given to the attacker as auxiliary
information.
n_attacks : int, default is 500.
Number of records to attack. If None each record in the original
dataset will be attacked.
n_neighbors : int, default is 1
The number of closest neighbors to include in the analysis. The
default of 1 means that the linkability attack is considered
successful only if the two original record split have the same
synthetic record as closest neighbor.
control : pd.DataFrame (optional)
Independent sample of original records **not** used to create the
synthetic dataset. This is used to evaluate the excess privacy risk.
"""
def __init__(
self,
ori: pd.DataFrame,
syn: pd.DataFrame,
aux_cols: Tuple[List[str], List[str]],
n_attacks: Optional[int] = 500,
n_neighbors: int = 1,
control: Optional[pd.DataFrame] = None,
):
self._ori = ori
self._syn = syn
self._n_attacks = n_attacks if n_attacks is not None else ori.shape[0]
self._aux_cols = aux_cols
self._n_neighbors = n_neighbors
self._control = control
self._evaluated = False
def evaluate(self, n_jobs: int = -2) -> "LinkabilityEvaluator":
"""Run the linkability attack.
Parameters
----------
n_jobs : int, default is -2
The number of parallel jobs to run for neighbors search.
Returns
-------
self
The evaluated ``LinkabilityEvaluator`` object.
"""
self._baseline_links = _random_linkability_attack(
n_synthetic=self._syn.shape[0], n_attacks=self._n_attacks, n_neighbors=self._n_neighbors
)
self._attack_links = _linkability_attack(
ori=self._ori,
syn=self._syn,
n_attacks=self._n_attacks,
aux_cols=self._aux_cols,
n_neighbors=self._n_neighbors,
n_jobs=n_jobs,
)
self._control_links = (
None
if self._control is None
else _linkability_attack(
ori=self._control,
syn=self._syn,
n_attacks=self._n_attacks,
aux_cols=self._aux_cols,
n_neighbors=self._n_neighbors,
n_jobs=n_jobs,
)
)
self._evaluated = True
return self
def results(self, confidence_level: float = 0.95, n_neighbors: Optional[int] = None) -> EvaluationResults:
"""Raw evaluation results.
Parameters
----------
confidence_level : float, default is 0.95
Confidence level for the error bound calculation.
n_neighbors : int, default is None
The number of closest neighbors to include in the analysis.
If `None` (the default), the number used it the one
given by the constructor. The value of this parameter must
be smaller of equal to what has been used to initialize this
evaluator.
Returns
-------
EvaluationResults
Object containing the success rates for the various attacks.
"""
if not self._evaluated:
raise RuntimeError("The linkability evaluator wasn't evaluated yet. Please, run `evaluate()` first.")
if n_neighbors is None:
n_neighbors = self._n_neighbors
if n_neighbors > self._n_neighbors:
raise ValueError(
f"Cannot compute linkability results for `n_neighbors` "
f"({n_neighbors}) larger than value used by constructor "
f"({self._n_neighbors}. Using `n_neighbors == {self._n_neighbors}`"
)
n_control = None if self._control_links is None else self._control_links.count_links(n_neighbors=n_neighbors)
return EvaluationResults(
n_attacks=self._n_attacks,
n_success=self._attack_links.count_links(n_neighbors=n_neighbors),
n_baseline=self._baseline_links.count_links(n_neighbors=n_neighbors),
n_control=n_control,
confidence_level=confidence_level,
)
def risk(
self, confidence_level: float = 0.95, baseline: bool = False, n_neighbors: Optional[int] = None
) -> PrivacyRisk:
"""Compute linkability risk.
The linkability risk reflects how easy linkability attacks are.
A linkability risk of 1 means that every single attacked record
could be successfully linked together. A linkability risk of 0
means that no links were found at all.
Parameters
----------
confidence_level : float, default is 0.95
Confidence level for the error bound calculation.
baseline : bool, default is False
If True, return the baseline risk computed from a random guessing
attack. If False (default) return the risk from the real attack.
n_neighbors : int, default is None
The number of closest neighbors to include in the analysis.
If `None` (the default), the number used it the one
given by the constructor. The value of this parameter must
be smaller of equal to what has been used to initialize this
evaluator.
Returns
-------
PrivacyRisk
Estimate of the linkability risk and its confidence interval.
"""
results = self.results(confidence_level=confidence_level, n_neighbors=n_neighbors)
return results.risk(baseline=baseline)
| 11,666 | 35.688679 | 117 | py |
anonymeter | anonymeter-main/src/anonymeter/evaluators/__init__.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
"""Tools to evaluate privacy risks along the directives of the Article 29 WGP."""
from anonymeter.evaluators.inference_evaluator import InferenceEvaluator
from anonymeter.evaluators.linkability_evaluator import LinkabilityEvaluator
from anonymeter.evaluators.singling_out_evaluator import SinglingOutEvaluator
__all__ = ["SinglingOutEvaluator", "LinkabilityEvaluator", "InferenceEvaluator"]
| 590 | 58.1 | 83 | py |
anonymeter | anonymeter-main/src/anonymeter/evaluators/inference_evaluator.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
"""Privacy evaluator that measures the inference risk."""
from typing import List, Optional
import numpy as np
import pandas as pd
from anonymeter.neighbors.mixed_types_kneighbors import MixedTypeKNeighbors
from anonymeter.stats.confidence import EvaluationResults, PrivacyRisk
def _run_attack(
target: pd.DataFrame,
syn: pd.DataFrame,
n_attacks: int,
aux_cols: List[str],
secret: str,
n_jobs: int,
naive: bool,
regression: Optional[bool],
) -> int:
if regression is None:
regression = pd.api.types.is_numeric_dtype(target[secret])
targets = target.sample(n_attacks, replace=False)
if naive:
guesses = syn.sample(n_attacks)[secret]
else:
nn = MixedTypeKNeighbors(n_jobs=n_jobs, n_neighbors=1).fit(candidates=syn[aux_cols])
guesses_idx = nn.kneighbors(queries=targets[aux_cols])
guesses = syn.iloc[guesses_idx.flatten()][secret]
return evaluate_inference_guesses(guesses=guesses, secrets=targets[secret], regression=regression).sum()
def evaluate_inference_guesses(
guesses: pd.Series, secrets: pd.Series, regression: bool, tolerance: float = 0.05
) -> np.ndarray:
"""Evaluate the success of an inference attack.
The attack is successful if the attacker managed to make a correct guess.
In case of regression problems, when the secret is a continuous variable,
the guess is correct if the relative difference between guess and target
is smaller than a given tolerance. In the case of categorical target
variables, the inference is correct if the secrets are guessed exactly.
Parameters
----------
guesses : pd.Series
Attacker guesses for each of the targets.
secrets : pd.Series
Array with the true values of the secret for each of the targets.
regression : bool
Whether or not the attacker is trying to solve a classification or
a regression task. The first case is suitable for categorical or
discrete secrets, the second for numerical continuous ones.
tolerance : float, default is 0.05
Maximum value for the relative difference between target and secret
for the inference to be considered correct.
Returns
-------
np.array
Array of boolean values indicating the correcteness of each guess.
"""
guesses = guesses.values
secrets = secrets.values
if regression:
rel_abs_diff = np.abs(guesses - secrets) / (guesses + 1e-12)
value_match = rel_abs_diff <= tolerance
else:
value_match = guesses == secrets
nan_match = np.logical_and(pd.isnull(guesses), pd.isnull(secrets))
return np.logical_or(nan_match, value_match)
class InferenceEvaluator:
"""Privacy evaluator that measures the inference risk.
The attacker's goal is to use the synthetic dataset to learn about some
(potentially all) attributes of a target record from the original database.
The attacker has a partial knowledge of some attributes of the target
record (the auxiliary information AUX) and uses a similarity score to find
the synthetic record that matches best the AUX. The success of the attack
is compared to the baseline scenario of the trivial attacker, who guesses
at random.
.. note::
For a thorough interpretation of the attack result, it is recommended to
set aside a small portion of the original dataset to use as a *control*
dataset for the Inference Attack. These control records should **not**
have been used to generate the synthetic dataset. For good statistical
accuracy on the attack results, 500 to 1000 control records are usually
enough.
Comparing how successful the attack is when targeting the *training* and
*control* dataset allows for a more sensitive measure of eventual
information leak during the training process. If, using the synthetic
data as a base, the attack is more successful against the original
records in the training set than it is when targeting the control data,
this indicates that specific information about some records have been
transferred to the synthetic dataset.
Parameters
----------
ori : pd.DataFrame
Dataframe with the target records whose secrets the attacker
will try to guess. This is the private dataframe from which
the synthetic one has been derived.
syn : pd.DataFrame
Dataframe with the synthetic records. It is assumed to be
fully available to the attacker.
control : pd.DataFrame (optional)
Independent sample of original records **not** used to
create the synthetic dataset. This is used to evaluate
the excess privacy risk.
aux_cols : list of str
Features of the records that are given to the attacker as auxiliary
information.
secret : str
Secret attribute of the targets that is unknown to the attacker.
This is what the attacker will try to guess.
regression : bool, optional
Specifies whether the target of the inference attack is quantitative
(regression = True) or categorical (regression = False). If None
(default), the code will try to guess this by checking the type of
the variable.
n_attacks : int, default is 500
Number of attack attempts.
"""
def __init__(
self,
ori: pd.DataFrame,
syn: pd.DataFrame,
aux_cols: List[str],
secret: str,
regression: Optional[bool] = None,
n_attacks: int = 500,
control: Optional[pd.DataFrame] = None,
):
self._ori = ori
self._syn = syn
self._control = control
self._n_attacks = n_attacks
self._secret = secret
self._regression = regression
self._aux_cols = aux_cols
self._evaluated = False
def _attack(self, target: pd.DataFrame, naive: bool, n_jobs: int) -> int:
return _run_attack(
target=target,
syn=self._syn,
n_attacks=self._n_attacks,
aux_cols=self._aux_cols,
secret=self._secret,
n_jobs=n_jobs,
naive=naive,
regression=self._regression,
)
def evaluate(self, n_jobs: int = -2) -> "InferenceEvaluator":
r"""Run the inference attack.
Parameters
----------
n_jobs : int, default is -2
The number of jobs to run in parallel.
Returns
-------
self
The evaluated ``InferenceEvaluator`` object.
"""
self._n_baseline = self._attack(target=self._ori, naive=True, n_jobs=n_jobs)
self._n_success = self._attack(target=self._ori, naive=False, n_jobs=n_jobs)
self._n_control = (
None if self._control is None else self._attack(target=self._control, naive=False, n_jobs=n_jobs)
)
self._evaluated = True
return self
def results(self, confidence_level: float = 0.95) -> EvaluationResults:
"""Raw evaluation results.
Parameters
----------
confidence_level : float, default is 0.95
Confidence level for the error bound calculation.
Returns
-------
EvaluationResults
Object containing the success rates for the various attacks.
"""
if not self._evaluated:
raise RuntimeError("The inference evaluator wasn't evaluated yet. Please, run `evaluate()` first.")
return EvaluationResults(
n_attacks=self._n_attacks,
n_success=self._n_success,
n_baseline=self._n_baseline,
n_control=self._n_control,
confidence_level=confidence_level,
)
def risk(self, confidence_level: float = 0.95, baseline: bool = False) -> PrivacyRisk:
"""Compute the inference risk from the success of the attacker.
This measures how much an attack on training data outperforms
an attack on control data. An inference risk of 0 means that
the attack had no advantage on the training data (no inference
risk), while a value of 1 means that the attack exploited the
maximally possible advantage.
Parameters
----------
confidence_level : float, default is 0.95
Confidence level for the error bound calculation.
baseline : bool, default is False
If True, return the baseline risk computed from a random guessing
attack. If False (default) return the risk from the real attack.
Returns
-------
PrivacyRisk
Estimate of the inference risk and its confidence interval.
"""
results = self.results(confidence_level=confidence_level)
return results.risk(baseline=baseline)
| 9,078 | 35.757085 | 111 | py |
anonymeter | anonymeter-main/src/anonymeter/evaluators/singling_out_evaluator.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
"""Privacy evaluator that measures the singling out risk."""
import logging
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import numpy as np
import pandas as pd
from pandas.api.types import is_bool_dtype, is_categorical_dtype, is_numeric_dtype
from scipy.optimize import curve_fit
from anonymeter.stats.confidence import EvaluationResults, PrivacyRisk
rng = np.random.default_rng()
logger = logging.getLogger(__name__)
def _escape_quotes(string: str) -> str:
return string.replace('"', '\\"').replace("'", "\\'")
def _query_expression(col: str, val: Any, dtype: np.dtype) -> str:
"""Generate type-aware query expression."""
query: str = ""
if pd.api.types.is_datetime64_any_dtype(dtype):
f"{col} == '{val}'"
elif isinstance(val, str):
query = f"{col} == '{_escape_quotes(val)}'"
else:
query = f"{col} == {val}"
return query
def _query_from_record(record: pd.Series, dtypes: pd.Series, columns: List[str], medians: Optional[pd.Series]) -> str:
"""Construct a query from the attributes in a record."""
query = []
for col in columns:
if pd.isna(record[col]):
item = ".isna()"
elif is_bool_dtype(dtypes[col]):
item = f"== {record[col]}"
elif is_numeric_dtype(dtypes[col]):
if medians is None:
operator = rng.choice([">=", "<="])
else:
if record[col] > medians[col]:
operator = ">="
else:
operator = "<="
item = f"{operator} {record[col]}"
elif is_categorical_dtype(dtypes[col]) and is_numeric_dtype(dtypes[col].categories.dtype):
item = f"=={record[col]}"
else:
if isinstance(record[col], str):
item = f"== '{_escape_quotes(record[col])}'"
else:
item = f'== "{record[col]}"'
query.append(f"{col}{item}")
return " & ".join(query)
def _random_operator(data_type: str) -> str:
if data_type == "categorical":
ops = ["==", "!="]
elif data_type == "boolean":
ops = ["", "not "]
elif data_type == "numerical":
ops = ["==", "!=", ">", "<", ">=", "<="]
else:
raise ValueError(f"Unknown `data_type`: {data_type}")
return rng.choice(ops)
def _random_query(unique_values: Dict[str, List[Any]], cols: List[str]):
"""Generate a random query using given columns."""
query = []
for col in cols:
values = unique_values[col]
val = rng.choice(values)
if pd.isna(val):
expression = f"{_random_operator('boolean')}{col}.isna()"
elif is_bool_dtype(values):
expression = f"{_random_operator('boolean')}{col}"
elif is_categorical_dtype(values):
expression = f"{col} {_random_operator('categorical')} {val}"
elif is_numeric_dtype(values):
expression = f"{col} {_random_operator('numerical')} {val}"
elif isinstance(val, str):
expression = f"{col} {_random_operator('categorical')} '{_escape_quotes(val)}'"
else:
expression = f"{col} {_random_operator('categorical')} '{val}'"
query.append(expression)
return " & ".join(query)
def _random_queries(df: pd.DataFrame, n_queries: int, n_cols: int) -> List[str]:
random_columns = [rng.choice(df.columns, size=n_cols, replace=False).tolist() for _ in range(n_queries)]
unique_values = {col: df[col].unique() for col in df.columns}
queries: List[str] = [_random_query(unique_values=unique_values, cols=cols) for cols in random_columns]
return queries
def safe_query_counts(query: str, df: pd.DataFrame) -> Optional[int]:
"""Return number of elements satisfying a given query."""
try:
return len(df.query(query, engine="python"))
except Exception as ex:
logger.debug(f"Query {query} failed with {ex}.")
return None
def singling_out_probability_integral(n: int, w_min: float, w_max: float) -> float:
"""Integral of the singling out probability within a given range.
The probability that a query singles out in a population of size
n is defined by the query "weight" (w), i.e. the chance that the
query matches a random row sampled from the data generating distribution.
This probability is given by: P(w, n) = n*w * (1 - w)**(n - 1).
See Cohen and Nissim 2020 [1] for more details.
References
----------
[1] - https://arxiv.org/abs/1904.06009
Parameters
----------
n : int
Size of the population
w_min : float
Lower extreme of integration. Must be between 0 and 1.
w_max : float
Higher extreme of integration. Must be between w_min and 1.
Returns
-------
float
The integral of the singling out probability in the given range.
"""
if w_min < 0 or w_min > 1:
raise ValueError(f"Parameter `w_min` must be between 0 and 1. Got {w_min} instead.")
if w_max < w_min or w_max > 1:
raise ValueError(
f"Parameter `w_max` must be greater than w_min ({w_min}) and smaller than 1. Got {w_max} instead."
)
return ((n * w_min + 1) * (1 - w_min) ** n - (n * w_max + 1) * (1 - w_max) ** n) / (n + 1)
def _measure_queries_success(
df: pd.DataFrame, queries: List[str], n_repeat: int, n_meas: int
) -> Tuple[np.ndarray, np.ndarray]:
sizes, successes = [], []
min_rows = min(1000, len(df))
for n_rows in np.linspace(min_rows, len(df), n_meas).astype(int):
for _ in range(n_repeat):
successes.append(len(_evaluate_queries(df=df.sample(n_rows, replace=False), queries=queries)))
sizes.append(n_rows)
return np.array(sizes), np.array(successes)
def _model(x, w_eff, norm):
return norm * singling_out_probability_integral(n=x, w_min=0, w_max=w_eff)
def _fit_model(sizes: np.ndarray, successes: np.ndarray) -> Callable:
# initial guesses
w_eff_guess = 1 / np.max(sizes)
norm_guess = 1 / singling_out_probability_integral(n=np.max(sizes), w_min=0, w_max=w_eff_guess)
popt, _ = curve_fit(_model, xdata=sizes, ydata=successes, bounds=(0, (1, np.inf)), p0=(w_eff_guess, norm_guess))
return lambda x: _model(x, *popt)
def fit_correction_term(df: pd.DataFrame, queries: List[str]) -> Callable:
"""Fit correction for different size of the control dataset.
Parameters
----------
df : pd.DataFrame
Dataframe on which the queries needs to be evaluated.
queries : list of strings
Singling out queries to evaluate on the data.
Returns
-------
callable
Model of how the number of queries that singles out
depends on the size of the dataset.
"""
sizes, successes = _measure_queries_success(df=df, queries=queries, n_repeat=5, n_meas=10)
return _fit_model(sizes=sizes, successes=successes)
class UniqueSinglingOutQueries:
"""Collection of unique queries that single out in a DataFrame."""
def __init__(self):
self._set: Set[str] = set()
self._list: List[str] = []
def check_and_append(self, query: str, df: pd.DataFrame):
"""Add a singling out query to the collection.
A query singles out if the following conditions are met:
1. single out one record in the dataset.
2. have either a very low or a very high weight. In
Both these cases singling out by chance is unlikely.
Moreover, only queries that are not already in this collection
can be added.
Parameters
----------
query : str
query expression to be added.
df : pd.DataFrame
Dataframe on which the queries need to single out.
"""
sorted_query = "".join(sorted(query))
if sorted_query not in self._set:
counts = safe_query_counts(query=query, df=df)
if counts is not None and counts == 1:
self._set.add(sorted_query)
self._list.append(query)
def __len__(self):
"""Length of the singling out queries in stored."""
return len(self._list)
@property
def queries(self) -> List[str]:
"""Queries that are present in the collection."""
return self._list
def univariate_singling_out_queries(df: pd.DataFrame, n_queries: int) -> List[str]:
"""Generate singling out queries from rare attributes.
Parameters
----------
df: pd.DataFrame
Input dataframe from which queries will be generated.
n_queries: int
Number of queries to generate.
Returns
-------
List[str]
The singling out queries.
"""
queries = []
for col in df.columns:
if df[col].isna().sum() == 1:
queries.append(f"{col}.isna()")
if pd.api.types.is_numeric_dtype(df.dtypes[col]):
values = df[col].dropna().sort_values()
if len(values) > 0:
queries.extend([f"{col} <= {values.iloc[0]}", f"{col} >= {values.iloc[-1]}"])
counts = df[col].value_counts()
rare_values = counts[counts == 1]
if len(rare_values) > 0:
queries.extend([_query_expression(col=col, val=val, dtype=df.dtypes[col]) for val in rare_values.index])
rng.shuffle(queries)
so_queries = UniqueSinglingOutQueries()
for query in queries:
so_queries.check_and_append(query, df=df)
if len(so_queries) == n_queries:
break
return so_queries.queries
def multivariate_singling_out_queries(df: pd.DataFrame, n_queries: int, n_cols: int) -> List[str]:
"""Generates singling out queries from a combination of attributes.
Parameters
----------
df: pd.DataFrame
Input dataframe from which queries will be generated.
n_queries: int
Number of queries to generate.
n_cols: float
Number of columns that the attacker uses to create the
singling out queries.
Returns
-------
List[str]
The singling out queries.
"""
so_queries = UniqueSinglingOutQueries()
medians = df.median(numeric_only=True)
while len(so_queries) < n_queries:
record = df.iloc[rng.integers(df.shape[0])]
columns = rng.choice(df.columns, size=n_cols, replace=False).tolist()
query = _query_from_record(record=record, dtypes=df.dtypes, columns=columns, medians=medians)
so_queries.check_and_append(query=query, df=df)
return so_queries.queries
def _evaluate_queries(df: pd.DataFrame, queries: List[str]) -> List[str]:
counts = np.array([safe_query_counts(query=q, df=df) for q in queries], dtype=float)
if np.any(np.isnan(counts)) > 0:
logger.warning(
f"Found {np.sum(np.isnan(counts))} failed queries "
f"out of {len(queries)}. Check DEBUG messages for more details."
)
success = counts == 1
return [q for iq, q in enumerate(queries) if success[iq]]
def _generate_singling_out_queries(df: pd.DataFrame, mode: str, n_attacks: int, n_cols: int) -> List[str]:
if mode == "univariate":
queries = univariate_singling_out_queries(df=df, n_queries=n_attacks)
elif mode == "multivariate":
queries = multivariate_singling_out_queries(df=df, n_queries=n_attacks, n_cols=n_cols)
else:
raise RuntimeError(f"Parameter `mode` can be either `univariate` or `multivariate`. Got {mode} instead.")
if len(queries) < n_attacks:
logger.warning(
f"Attack `{mode}` could generate only {len(queries)} "
f"singling out queries out of the requested {n_attacks}. "
"This can probably lead to an underestimate of the "
"singling out risk."
)
return queries
class SinglingOutEvaluator:
"""Privacy evaluator that measures the singling out risk.
Singling out happens when the attacker can determine that
there is a single individual in the dataset that has certain
attributes (for example "zip_code == XXX and first_name == YYY")
with high enough confidence. According to the Article 29 WGP [2],
singling out is one of the three risks (together with
linkability and inference) that a successful anonymization technique
must protect from.
See [1] for the definition of some of the concepts used here.
- [1]: https://arxiv.org/abs/1904.06009
- [2]: https://ec.europa.eu/justice/article-29/documentation/\
opinion-recommendation/files/2014/wp216_en.pdf
Parameters
----------
ori : pd.DataFrame
Original dataframe on which the success of the singling out attacker
attacker will be evaluated.
syn : pd.DataFrame
Synthetic dataframe used to generate the singling out queries.
n_attacks : int, default is 500
Number of singling out attacks to attempt.
n_cols : int, default is 3
Number of columns that the attacker uses to create the singling
out queries.
control : pd.DataFrame (optional)
Independent sample of original records **not** used to create the
synthetic dataset. This is used to evaluate the excess privacy risk.
"""
def __init__(
self,
ori: pd.DataFrame,
syn: pd.DataFrame,
n_attacks: int = 500,
n_cols: int = 3,
control: Optional[pd.DataFrame] = None,
):
self._ori = ori.drop_duplicates()
self._syn = syn.drop_duplicates()
self._n_attacks = n_attacks
self._n_cols = n_cols
self._control = None if control is None else control.drop_duplicates()
self._queries: List[str] = []
self._random_queries: List[str] = []
self._evaluated = False
def queries(self, baseline: bool = False) -> List[str]:
"""Successful singling out queries.
Parameters
----------
baseline: bool, default is False.
If True, return the queries used by the baseline attack (i.e.
created at random). If False (default) return the queries used
by the "real" attack.
Returns
-------
List[str]:
successful singling out queries.
"""
return self._random_queries if baseline else self._queries
def evaluate(self, mode: str = "multivariate") -> "SinglingOutEvaluator":
"""Run the attack and evaluate the guesses on the original dataset.
Parameters
----------
mode : str, default is "multivariate"
Name of the algorithm used to generate the singling out queries.
Could be either `multivariate` or `univariate`.
Returns
-------
self
The evaluated singling out evaluator.
"""
if mode == "multivariate":
n_cols = self._n_cols
elif mode == "univariate":
n_cols = 1
else:
raise ValueError(f"mode must be either 'multivariate' or 'univariate', got {mode} instead.")
baseline_queries = _random_queries(df=self._syn, n_queries=self._n_attacks, n_cols=n_cols)
self._baseline_queries = _evaluate_queries(df=self._ori, queries=baseline_queries)
self._n_baseline = len(self._baseline_queries)
queries = _generate_singling_out_queries(
df=self._syn, n_attacks=self._n_attacks, n_cols=self._n_cols, mode=mode
)
self._queries = _evaluate_queries(df=self._ori, queries=queries)
self._n_success = len(self._queries)
if self._control is None:
self._n_control = None
else:
self._n_control = len(_evaluate_queries(df=self._control, queries=queries))
# correct the number of success against the control set
# to account for different dataset sizes.
if len(self._control) != len(self._ori):
# fit the model to the data:
fitted_model = fit_correction_term(df=self._control, queries=queries)
correction = fitted_model(len(self._ori)) / fitted_model(len(self._control))
self._n_control *= correction
self._evaluated = True
return self
def results(self, confidence_level: float = 0.95) -> EvaluationResults:
"""Raw evaluation results.
Parameters
----------
confidence_level : float, default is 0.95
Confidence level for the error bound calculation.
Returns
-------
EvaluationResults
Object containing the success rates for the various attacks.
"""
if not self._evaluated:
raise RuntimeError("The singling out evaluator wasn't evaluated yet. Please, run `evaluate()` first.")
return EvaluationResults(
n_attacks=self._n_attacks,
n_success=self._n_success,
n_baseline=self._n_baseline,
n_control=self._n_control,
confidence_level=confidence_level,
)
def risk(self, confidence_level: float = 0.95, baseline: bool = False) -> PrivacyRisk:
"""Estimate the singling out risk.
The risk is estimated comparing the number of successfull singling out
queries to the desired number of attacks (``n_attacks``).
Parameters
----------
confidence_level : float
Confidence level for the reported error on the singling out risk.
baseline : bool, default is False
If True, return the baseline risk computed from a random guessing
attack. If False (default) return the risk from the real attack.
Returns
-------
PrivacyRisk
Estimate of the singling out risk and its confidence interval.
"""
results = self.results(confidence_level=confidence_level)
return results.risk(baseline=baseline)
| 18,133 | 32.273394 | 118 | py |
anonymeter | anonymeter-main/src/anonymeter/stats/__init__.py | 0 | 0 | 0 | py | |
anonymeter | anonymeter-main/src/anonymeter/stats/confidence.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
"""Functions for estimating rates and errors in privacy attacks."""
import warnings
from math import sqrt
from typing import NamedTuple, Optional, Tuple
from scipy.stats import norm
class PrivacyRisk(NamedTuple):
"""Measure of a privacy risk.
Parameters
----------
value : float
Best estimate of the privacy risk.
ci : (float, float)
Confidence interval on the best estimate.
"""
value: float
ci: Tuple[float, float]
class SuccessRate(NamedTuple):
"""Estimate of the success rate of a privacy attack.
Parameters
----------
value : float
Best estimate of the success rate of the attacker.
error : float
Error on the best estimate.
"""
value: float
error: float
def to_risk(self) -> PrivacyRisk:
"""Convert attacker success rate to `PrivacyRisk`."""
return bind_value(point_estimate=self.value, error_bound=self.error)
def probit(confidence_level: float) -> float:
"""Compute the probit for the given confidence level."""
return norm.ppf(0.5 * (1.0 + confidence_level))
def success_rate(n_total: int, n_success: int, confidence_level: float) -> SuccessRate:
"""Estimate success rate in a Bernoulli-distributed sample.
Attack scores follow a Bernoulli distribution (success/failure with rates p/1-p).
The Wilson score interval is a frequentist-type estimator for success rate and
confidence which is robust in problematic cases (e.g., when p goes extreme or
sample size is small). The estimated rate is a weighted average between the
MLE result and 0.5 which, however, in the sample sizes used in privacy attacks
does not differ visibly from the MLE outcome.
Parameters
----------
n_total : int
Size of the sample.
n_success : int
Number of successful trials in the sample.
confidence_level : float
Confidence level for the error estimation.
Returns
-------
float
Point estimate for the success rate.
float
Error bound of the point-estimated rate for the requested confidence level.
Notes
-----
E.B. WILSON
Probable inference, the law of succession, and statistical inference
Journal of the American Statistical Association 22, 209-212 (1927)
DOI 10.1080/01621459.1927.10502953
"""
if confidence_level > 1 or confidence_level < 0:
raise ValueError(f"Parameter `confidence_level` must be between 0 and 1. Got {confidence_level} instead.")
z = probit(confidence_level)
z_squared = z * z
n_success_var = n_success * (n_total - n_success) / n_total
denominator = n_total + z_squared
rate = (n_success + 0.5 * z_squared) / denominator
error = (z / denominator) * sqrt(n_success_var + 0.25 * z_squared)
return SuccessRate(value=rate, error=error)
def residual_success(
attack_rate: SuccessRate,
control_rate: SuccessRate,
) -> SuccessRate:
"""Compute residual success in a privacy attack.
Residual success is defined as the excess of training attack
success over control attack success, normalized w.r.t.
the margin of improvement (unsuccessful attacks on control).
Parameters
----------
attack_rate : SuccessRate
Success rate on training data.
control_rate : SuccessRate
Success rate on control data.
Returns
-------
SuccessRate
Residual success score without sign correction (i.e., negative
outcome if control more attack-able than training). The correction
would yield ``0 ≤ score ≤ 1`` (zero for negative uncorrected score).
The error estimate is the propagated error bound of the residual
success rate.
"""
residual = (attack_rate.value - control_rate.value) / (1.0 - control_rate.value)
# propagate the error using
# dF = sqrt[ (dF/dx)^2 dx^2 + (dF/dy)^2 dy^2 + ... ]
der_wrt_attack = 1 / abs(1 - control_rate.value)
der_wrt_control = (attack_rate.value - 1) / (1 - control_rate.value) ** 2
error = sqrt((attack_rate.error * der_wrt_attack) ** 2 + (control_rate.error * der_wrt_control) ** 2)
return SuccessRate(value=residual, error=error)
def bind_value(point_estimate: float, error_bound: float) -> PrivacyRisk:
"""Force point_estimate and error into fixed bounds.
Parameters
----------
point_estimate : float
Point estimate of a rate or risk value.
error_bound : float
Symmetric error around the point estimate.
Returns
-------
float
Point estimate respecting the bounds 0–1 or 0–100.
Tuple[float, float]
Asymmetric confidence interval respecting the bounds 0–1 or 0–100.
"""
bound_point = min(max(point_estimate, 0.0), 1.0)
bound_lower = min(max(point_estimate - error_bound, 0.0), 1.0)
bound_upper = min(max(point_estimate + error_bound, 0.0), 1.0)
return PrivacyRisk(value=bound_point, ci=(bound_lower, bound_upper))
class EvaluationResults:
"""Results of a privacy evaluator.
This class will compute the attacker's success rates
and estimate for the corresponding privacy risk.
Parameters
----------
n_attacks : int
Total number of attacks performed.
n_success : int
Number of successful attacks.
n_baseline : int
Number of successful attacks for the
baseline (i.e. random-guessing) attacker.
n_control : int, default is None
Number of successful attacks against the
control dataset. If this parameter is not None
the privacy risk will be measured relative to
the attacker success on the control set.
confidence_level : float, default is 0.95
Desired confidence level for the confidence
intervals on the risk.
"""
def __init__(
self,
n_attacks: int,
n_success: int,
n_baseline: int,
n_control: Optional[int] = None,
confidence_level: float = 0.95,
):
self.attack_rate = success_rate(n_total=n_attacks, n_success=n_success, confidence_level=confidence_level)
self.baseline_rate = success_rate(n_total=n_attacks, n_success=n_baseline, confidence_level=confidence_level)
self.control_rate = (
None
if n_control is None
else success_rate(n_total=n_attacks, n_success=n_control, confidence_level=confidence_level)
)
self.n_attacks = n_attacks
self.n_success = n_success
self.n_baseline = n_baseline
self.n_control = n_control
self._sanity_check()
def _sanity_check(self):
if self.baseline_rate.value >= self.attack_rate.value:
warnings.warn(
"Attack is as good or worse as baseline model. "
f"Estimated rates: attack = {self.attack_rate.value}, "
f"baseline = {self.baseline_rate.value}. "
"Analysis results cannot be trusted.",
stacklevel=2,
)
if self.control_rate is not None and self.control_rate.value == 1:
warnings.warn("Success of control attack is 100%. Cannot measure residual privacy risk.", stacklevel=2)
def risk(self, baseline: bool = False) -> PrivacyRisk:
"""Estimate the privacy risk."""
if baseline:
return self.baseline_rate.to_risk()
if self.control_rate is None:
return self.attack_rate.to_risk()
else:
return residual_success(attack_rate=self.attack_rate, control_rate=self.control_rate).to_risk()
| 7,791 | 31.60251 | 117 | py |
anonymeter | anonymeter-main/src/anonymeter/preprocessing/transformations.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
"""Data pre-processing and transformations for the privacy evaluators."""
import logging
from typing import List, Tuple
import pandas as pd
from sklearn.preprocessing import LabelEncoder
logger = logging.getLogger(__name__)
def _encode_categorical(
df1: pd.DataFrame,
df2: pd.DataFrame,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Encode dataframes with categorical values keeping label consistend."""
encoded = pd.concat((df1, df2), keys=["df1", "df2"])
for col in encoded.columns:
encoded[col] = LabelEncoder().fit_transform(encoded[col])
return encoded.loc["df1"], encoded.loc["df2"]
def _scale_numerical(df1: pd.DataFrame, df2: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Scale dataframes with *only* numerical values."""
df1_min, df1_max = df1.min(), df1.max()
df2_min, df2_max = df2.min(), df2.max()
mins = df1_min.where(df1_min < df2_min, df2_min)
maxs = df1_max.where(df1_max > df2_max, df2_max)
ranges = maxs - mins
if any(ranges == 0):
cnames = ", ".join(ranges[ranges == 0].index.values)
logger.debug(
f"Numerical column(s) {cnames} have a null-range: all elements "
"have the same value. These column(s) won't be scaled."
)
ranges[ranges == 0] = 1
df1_scaled = df1.apply(lambda x: x / ranges[x.name])
df2_scaled = df2.apply(lambda x: x / ranges[x.name])
return df1_scaled, df2_scaled
def mixed_types_transform(
df1: pd.DataFrame, df2: pd.DataFrame, num_cols: List[str], cat_cols: List[str]
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Combination of an encoder and a scaler to treat mixed type data.
Numerical columns are scaled by dividing them by their range across both
datasets, so that the difference between any two values within a column will
be smaller than or equal to one:
x -> x' = x / max{max(x), max(x_other)} - min{min(x), min(x_other)}
Categorical columns are label encoded. This encoding is based on the
`statice.preprocessing.encoders.DataframeEncoder` fitted on the firts
dataframe, and applied to both of them.
Parameters
----------
df1: pd.DataFrame.
Input DataFrame. This dataframe will be used to fit the DataframeLabelEncoder.
df2: pd.DataFrame.
Second input DataFrame.
num_cols: list[str].
Names of the numerical columns to be processed.
cat_cols: list[str].
Names of the columns to be processed.
Returns
-------
trans_df1: pd.DataFrame.
Transformed df1.
trans_df2: pd.DataFrame.
Transformed df2.
"""
if not set(df1.columns) == set(df2.columns):
raise ValueError(f"Input dataframes have different columns. df1: {df1.columns}, df2: {df2.columns}.")
if not set(num_cols + cat_cols) == set(df1.columns):
raise ValueError(
f"Dataframes columns {df1.columns} do not match "
"with `num_cols` and `cat_cols`.\n"
f"num_cols: {num_cols}\n"
f"cat_cols: {cat_cols}"
)
df1_num, df2_num = pd.DataFrame(), pd.DataFrame()
if len(num_cols) > 0:
df1_num, df2_num = _scale_numerical(df1[num_cols], df2[num_cols])
df1_cat, df2_cat = pd.DataFrame(), pd.DataFrame()
if len(cat_cols) > 0:
df1_cat, df2_cat = _encode_categorical(df1[cat_cols], df2[cat_cols])
df1_out = pd.concat([df1_num, df1_cat], axis=1)[df1.columns]
df2_out = pd.concat([df2_num, df2_cat], axis=1)[df2.columns]
return df1_out, df2_out
| 3,730 | 34.198113 | 109 | py |
anonymeter | anonymeter-main/src/anonymeter/preprocessing/type_detection.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
from typing import Dict, List
import pandas as pd
def detect_col_types(df: pd.DataFrame) -> Dict[str, List[str]]:
"""Identify numerical and non-numerical columns in the dataframe.
Parameters
----------
df : pandas.DataFrame
Returns
-------
Dict[str: List[str]]
Dictionary with column names separated by types. Key of the dictionary are
'num' or 'cat' (numerical and non-numerical, that is categorical, resp.).
Values are lists of column names.
"""
num_cols: List[str] = list(df.select_dtypes("number").columns.values)
cat_cols: List[str] = [cn for cn in df.columns.values if cn not in num_cols]
return {"num": sorted(num_cols), "cat": sorted(cat_cols)}
def detect_consistent_col_types(df1: pd.DataFrame, df2: pd.DataFrame):
"""Detect colum types for a pair dataframe an check that they are the same.
Parameters
----------
df1 : pandas.DataFrame
Input dataframe
df2 : pandas.DataFrame
Input dataframe
Returns
-------
Dict[str: List[str]]
Dictionary with column names separated by types. Key of the dictionary are
'num' or 'cat' (numerical and non-numerical, that is categorical, resp.).
Values are lists of column names.
"""
ctypes1 = detect_col_types(df1)
if ctypes1 != detect_col_types(df2):
raise RuntimeError("Input dataframes have different column names/types.")
return ctypes1
| 1,658 | 29.722222 | 83 | py |
anonymeter | anonymeter-main/src/anonymeter/preprocessing/__init__.py | 0 | 0 | 0 | py | |
anonymeter | anonymeter-main/tests/test_transformations.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
import numpy as np
import pandas as pd
import pytest
from scipy.spatial.distance import pdist, squareform
from anonymeter.preprocessing.transformations import mixed_types_transform
rng = np.random.default_rng()
def test_scaling_numerical():
df_ori = pd.DataFrame({"c": rng.random(5)})
df_syn = pd.DataFrame({"c": rng.random(5)})
tdf_ori, tdf_syn = mixed_types_transform(df_ori, df_syn, num_cols=["c"], cat_cols=[])
# values are scaled so that abs(difference) is between 0 and 1.
# since this is a square distance matrix, there will be two elements with d=1
vals = pd.concat([tdf_ori, tdf_syn])["c"].values
dm = squareform(pdist(vals[:, np.newaxis], "cityblock"))
assert np.sum(np.isclose(dm, 1)) == 2
assert np.amin(dm) == 0
@pytest.mark.parametrize(
"df1, df2, exp1, exp2",
[
(
pd.DataFrame({"c": ["a", "b", "c", "d"]}),
pd.DataFrame({"c": ["a", "b", "c", "c"]}),
pd.DataFrame({"c": [0, 1, 2, 3]}),
pd.DataFrame({"c": [0, 1, 2, 2]}),
),
(
pd.DataFrame({"c": ["a", "b", "c", None]}),
pd.DataFrame({"c": ["a", "b", "c", "c"]}),
pd.DataFrame({"c": [0, 1, 2, 3]}),
pd.DataFrame({"c": [0, 1, 2, 2]}),
),
(
pd.DataFrame({"c": ["a", "b", "c", "d"]}),
pd.DataFrame({"c": ["a", "b", None, "c"]}),
pd.DataFrame({"c": [0, 1, 2, 3]}),
pd.DataFrame({"c": [0, 1, 4, 2]}),
),
],
)
def test_encoding_categorical(df1, df2, exp1, exp2):
enc1, enc2 = mixed_types_transform(df1=df1, df2=df2, cat_cols=["c"], num_cols=[])
pd.testing.assert_frame_equal(enc1, exp1)
pd.testing.assert_frame_equal(enc2, exp2)
@pytest.mark.parametrize(
"df1, df2, exp1, exp2",
[
(
pd.DataFrame({"c": ["a", "b", "c"]}),
pd.DataFrame({"c": ["a", "b", "d"]}),
pd.DataFrame({"c": [0, 1, 2]}),
pd.DataFrame({"c": [0, 1, 3]}),
),
(
pd.DataFrame({"c": ["a", "b", "c"]}),
pd.DataFrame({"c": ["a", "b", None]}),
pd.DataFrame({"c": [0, 1, 2]}),
pd.DataFrame({"c": [0, 1, 3]}),
),
(
pd.DataFrame({"c": [None, "b", "c"]}),
pd.DataFrame({"c": ["a", "b", None]}),
pd.DataFrame({"c": [3, 1, 2]}),
pd.DataFrame({"c": [0, 1, 3]}),
),
],
)
def test_encoding_categorical_new_values(df1, df2, exp1, exp2):
enc1, enc2 = mixed_types_transform(df1=df1, df2=df2, cat_cols=["c"], num_cols=[])
pd.testing.assert_frame_equal(enc1, exp1)
pd.testing.assert_frame_equal(enc2, exp2)
| 2,885 | 34.195122 | 89 | py |
anonymeter | anonymeter-main/tests/test_type_detection.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
import numpy as np
import pandas as pd
import pytest
from anonymeter.preprocessing.type_detection import detect_col_types, detect_consistent_col_types
rng = np.random.default_rng()
@pytest.mark.parametrize(
"df, expected",
[
(pd.DataFrame({"num": rng.random(5), "cat": list("abcde")}), {"cat": ["cat"], "num": ["num"]}),
(pd.DataFrame({"num1": rng.random(5), "num2": [1, 2, 3, 4, 5]}), {"cat": [], "num": ["num1", "num2"]}),
(
pd.DataFrame({"num1": rng.random(5), "num2": [1, 2, 3, 4, 5]}).astype("object"),
{"cat": ["num1", "num2"], "num": []},
),
(
pd.DataFrame({"cat1": list("abcde"), "cat2": ["1", "2", "3", "4", "5"]}),
{"cat": ["cat1", "cat2"], "num": []},
),
],
)
def test_detect_col_types(df, expected):
ctypes = detect_col_types(df=df)
assert ctypes == expected
def test_detect_col_types_consistent():
df1 = pd.DataFrame({"num": rng.random(5), "cat": list("abcde")})
df2 = pd.DataFrame({"num": rng.random(5), "cat": list("fghil")})
assert detect_consistent_col_types(df1, df2) == {"cat": ["cat"], "num": ["num"]}
def test_detect_col_types_consistent_raises():
df1 = pd.DataFrame({"num": rng.random(5), "cat": list("abcde")})
df2 = pd.DataFrame({"num": [str(_) for _ in rng.random(5)], "cat": list("fghil")})
with pytest.raises(RuntimeError):
detect_consistent_col_types(df1, df2)
| 1,643 | 36.363636 | 111 | py |
anonymeter | anonymeter-main/tests/test_linkability_evaluator.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
import numpy as np
import pandas as pd
import pytest
from anonymeter.evaluators.linkability_evaluator import LinkabilityEvaluator, LinkabilityIndexes
from tests.fixtures import get_adult
rng = np.random.default_rng(seed=42)
@pytest.mark.parametrize("n_attacks", [4, None])
@pytest.mark.parametrize(
"n_neighbors, confidence_level, expected_risk, expected_ci",
[
(1, 0, 0.25, (0.25, 0.25)),
(2, 0, 1, (1.0, 1.0)),
(3, 0, 1, (1.0, 1.0)),
(4, 0, 1, (1.0, 1.0)),
(1, 0.95, 0.3725, (0.045587, 0.699358)),
(2, 0.95, 0.7551, (0.5102, 1.0)),
],
)
def test_linkability_evaluator(n_neighbors, confidence_level, expected_risk, expected_ci, n_attacks):
ori = pd.DataFrame({"col0": [0, 0, 4, 0], "col1": [0, 1, 9, 4]})
syn = pd.DataFrame({"col0": [0, 1, 4, 9], "col1": [0, 1, 4, 9]})
evaluator = LinkabilityEvaluator(
ori=ori, syn=syn, n_attacks=n_attacks, n_neighbors=n_neighbors, aux_cols=(["col0"], ["col1"])
)
evaluator.evaluate(n_jobs=1)
risk, ci = evaluator.risk(confidence_level=confidence_level)
np.testing.assert_allclose(risk, expected_risk, atol=1e-4)
np.testing.assert_allclose(ci, expected_ci, atol=1e-4)
@pytest.mark.parametrize("n_attacks", [4, None])
@pytest.mark.parametrize(
"n_neighbors, confidence_level, expected_risk, expected_ci",
[
(1, 0, 0.25, (0.25, 0.25)),
(2, 0, 1, (1.0, 1.0)),
(3, 0, 1, (1.0, 1.0)),
(4, 0, 1, (1.0, 1.0)),
(1, 0.95, 0.3725, (0.045587, 0.699358)),
(2, 0.95, 0.7551, (0.5102, 1.0)),
],
)
def test_linkability_evaluator_neighbors(n_neighbors, confidence_level, expected_risk, expected_ci, n_attacks):
# see comment in the test_linkability_evaluator to understand
# the ground truth on which this test is based.
ori = pd.DataFrame({"col0": [0, 0, 4, 0], "col1": [0, 1, 9, 4]})
syn = pd.DataFrame({"col0": [0, 1, 4, 9], "col1": [0, 1, 4, 9]})
evaluator = LinkabilityEvaluator(
ori=ori, syn=syn, n_attacks=n_attacks, n_neighbors=4, aux_cols=(["col0"], ["col1"])
)
evaluator.evaluate(n_jobs=1)
risk, ci = evaluator.risk(confidence_level=confidence_level, n_neighbors=n_neighbors)
np.testing.assert_allclose(risk, expected_risk, atol=1e-4)
np.testing.assert_allclose(ci, expected_ci, atol=1e-4)
@pytest.mark.parametrize("n_neighbors, fails", [(1, False), (2, False), (3, False), (4, False), (5, True), (45, True)])
def test_linkability_evaluator_neighbors_fails(n_neighbors, fails):
ori = pd.DataFrame({"col0": [0, 0, 4, 0], "col1": [0, 1, 9, 4]})
syn = pd.DataFrame({"col0": [0, 1, 4, 9], "col1": [0, 1, 4, 9]})
evaluator = LinkabilityEvaluator(ori=ori, syn=syn, n_attacks=4, n_neighbors=4, aux_cols=(["col0"], ["col1"]))
evaluator.evaluate(n_jobs=1)
if fails:
with pytest.raises(ValueError):
evaluator.risk(n_neighbors=n_neighbors)
else:
evaluator.risk(n_neighbors=n_neighbors)
@pytest.mark.parametrize("n_neighbors, expected_risk", [(1, 0.25), (2, 5 / 6), (3, 1), (4, 1)])
def test_baseline(n_neighbors, expected_risk):
# note that for the baseline attack, it does not really matter
# what's inside the synthetic or the original dataframe.
ori = pd.DataFrame(rng.choice(["a", "b"], size=(400, 2)), columns=["c0", "c1"])
syn = pd.DataFrame([["a", "a"], ["b", "b"], ["a", "a"], ["a", "a"]], columns=["c0", "c1"])
evaluator = LinkabilityEvaluator(ori=ori, syn=syn, n_attacks=None, n_neighbors=n_neighbors, aux_cols=("c0", "c1"))
evaluator.evaluate(n_jobs=1)
baseline_risk, _ = evaluator.risk(confidence_level=0.95, baseline=True)
np.testing.assert_allclose(baseline_risk, expected_risk, atol=5e-2)
@pytest.mark.parametrize(
"n_neighbors, idx_0, idx_1, expected, n_expected",
[
(1, [[0], [1], [2], [3]], [[4], [5], [6], [7]], {}, 0),
(1, [[0], [1], [2], [3]], [[4], [1], [6], [7]], {1: {1}}, 1),
(1, [[0], [1], [2], [3]], [[4], [1], [6], [7]], {1: {1}}, 1),
(1, [[0], [1], [6], [3]], [[4], [1], [6], [7]], {1: {1}, 2: {6}}, 2),
(1, [[0, 1], [2, 3]], [[1, 0], [3, 2]], {}, 0),
(2, [[0, 1], [2, 3]], [[1, 0], [3, 2]], {0: {0, 1}, 1: {2, 3}}, 2),
],
)
def test_find_links(n_neighbors, idx_0, idx_1, expected, n_expected):
indexes = LinkabilityIndexes(idx_0=np.array(idx_0), idx_1=np.array(idx_1))
links = indexes.find_links(n_neighbors=n_neighbors)
n_links = indexes.count_links(n_neighbors=n_neighbors)
assert links == expected
assert n_links == n_expected
@pytest.mark.parametrize("confidence_level", [0.5, 0.68, 0.95, 0.99])
def test_linkability_risk(confidence_level):
ori = get_adult("ori", n_samples=10)
col_sample = rng.choice(ori.columns, size=4, replace=False)
evaluator = LinkabilityEvaluator(
ori=ori, syn=ori, n_attacks=10, n_neighbors=5, aux_cols=(col_sample[:2], col_sample[2:])
)
evaluator.evaluate(n_jobs=1)
risk, ci = evaluator.risk(confidence_level=confidence_level)
np.testing.assert_allclose(ci[1], 1.0)
def test_evaluator_not_evaluated():
evaluator = LinkabilityEvaluator(ori=pd.DataFrame(), syn=pd.DataFrame(), aux_cols=[])
with pytest.raises(RuntimeError):
evaluator.risk()
| 5,463 | 41.030769 | 119 | py |
anonymeter | anonymeter-main/tests/test_inference_evaluator.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
import numpy as np
import pandas as pd
import pytest
from anonymeter.evaluators.inference_evaluator import InferenceEvaluator, evaluate_inference_guesses
from tests.fixtures import get_adult
@pytest.mark.parametrize(
"guesses, secrets, expected",
[
(("a", "b"), ("a", "b"), (True, True)),
((np.nan, "b"), (np.nan, "b"), (True, True)),
((np.nan, np.nan), (np.nan, np.nan), (True, True)),
((np.nan, "b"), ("a", np.nan), (False, False)),
(("a", "b"), ("a", "c"), (True, False)),
(("b", "b"), ("a", "c"), (False, False)),
((1, 0), (2, 0), (False, True)),
],
)
def test_evaluate_inference_guesses_classification(guesses, secrets, expected):
out = evaluate_inference_guesses(guesses=pd.Series(guesses), secrets=pd.Series(secrets), regression=False)
np.testing.assert_equal(out, expected)
@pytest.mark.parametrize(
"guesses, secrets, expected",
[
((1.0, 1.0), (1.0, 1.0), (True, True)),
((1.01, 1.0), (1.0, 1.01), (True, True)),
((1.0, 1.0), (2.0, 1.01), (False, True)),
((1.0, 2.0), (2.0, 1.01), (False, False)),
],
)
def test_evaluate_inference_guesses_regression(guesses, secrets, expected):
out = evaluate_inference_guesses(guesses=pd.Series(guesses), secrets=pd.Series(secrets), regression=True)
np.testing.assert_equal(out, expected)
@pytest.mark.parametrize(
"guesses, secrets, tolerance, expected",
[
((1.0, 1.0), (1.05, 1.06), 0.05, (True, False)),
((1.0, 1.0), (1.05, 1.06), 0.06, (True, True)),
((1.0, np.nan), (1.05, np.nan), 0.06, (True, True)),
((np.nan, np.nan), (np.nan, np.nan), 0.06, (True, True)),
((1, np.nan), (np.nan, 1.06), 0.06, (False, False)),
((1.0, 1.0), (1.05, 1.06), 0.04, (False, False)),
((1.0, 1.0), (1.25, 1.26), 0.2, (False, False)),
((1.0, 1.0), (1.26, 1.25), 0.25, (False, True)),
],
)
def test_evaluate_inference_guesses_regression_tolerance(guesses, secrets, tolerance, expected):
out = evaluate_inference_guesses(
guesses=pd.Series(guesses), secrets=pd.Series(secrets), tolerance=tolerance, regression=True
)
np.testing.assert_equal(out, expected)
@pytest.mark.parametrize(
"ori, syn, expected",
[
([["a", "b"], ["c", "d"]], [["a", "b"], ["c", "d"]], 1),
([["a", "b"], ["c", "d"]], [["a", "b"], ["c", "e"]], 0.5),
([["a", "b"], ["c", "d"]], [["a", "h"], ["c", "g"]], 0.0),
],
)
def test_inference_evaluator_rates(ori, syn, expected):
ori = pd.DataFrame(ori, columns=["c0", "c1"])
syn = pd.DataFrame(syn, columns=["c0", "c1"])
evaluator = InferenceEvaluator(ori=ori, syn=syn, control=ori, aux_cols=["c0"], secret="c1", n_attacks=2).evaluate(
n_jobs=1
)
results = evaluator.results(confidence_level=0)
np.testing.assert_equal(results.attack_rate, (expected, 0))
np.testing.assert_equal(results.control_rate, (expected, 0))
@pytest.mark.parametrize(
"aux_cols",
[
["type_employer", "capital_loss", "hr_per_week", "age"],
["education_num", "marital", "capital_loss"],
["age", "type_employer", "race"],
],
)
@pytest.mark.parametrize("secret", ["education", "marital", "capital_gain"])
def test_inference_evaluator_leaks(aux_cols, secret):
ori = get_adult("ori", n_samples=10)
evaluator = InferenceEvaluator(ori=ori, syn=ori, control=ori, aux_cols=aux_cols, secret=secret, n_attacks=10)
evaluator.evaluate(n_jobs=1)
results = evaluator.results(confidence_level=0)
np.testing.assert_equal(results.attack_rate, (1, 0))
np.testing.assert_equal(results.control_rate, (1, 0))
def test_evaluator_not_evaluated():
evaluator = InferenceEvaluator(
ori=pd.DataFrame(), syn=pd.DataFrame(), control=pd.DataFrame(), aux_cols=[], secret=""
)
with pytest.raises(RuntimeError):
evaluator.risk()
| 4,103 | 37 | 118 | py |
anonymeter | anonymeter-main/tests/test_singling_out_evaluator.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
import numpy as np
import pandas as pd
import pytest
from scipy import integrate
from anonymeter.evaluators.singling_out_evaluator import (
SinglingOutEvaluator,
UniqueSinglingOutQueries,
multivariate_singling_out_queries,
safe_query_counts,
singling_out_probability_integral,
univariate_singling_out_queries,
)
from tests.fixtures import get_adult
@pytest.mark.parametrize("mode", ["univariate", "multivariate"])
def test_so_general(mode):
ori = get_adult("ori", n_samples=10)
syn = get_adult("syn", n_samples=10)
soe = SinglingOutEvaluator(ori=ori, syn=syn, n_attacks=5).evaluate(mode=mode)
for q in soe.queries():
assert len(syn.query(q) == 1)
assert len(ori.query(q) == 1)
def test_singling_out_queries_unique():
df = pd.DataFrame({"c1": [1], "c2": [2]})
queries = UniqueSinglingOutQueries()
q1, q2 = "c1 == 1", "c2 == 2"
queries.check_and_append(q1, df=df)
queries.check_and_append(q1, df=df)
assert queries.queries == [q1]
queries.check_and_append(q2, df=df)
assert queries.queries == [q1, q2]
queries = UniqueSinglingOutQueries()
q3, q4 = f"{q1} and {q2}", f"{q2} and {q1}"
queries.check_and_append(q3, df=df)
queries.check_and_append(q4, df=df)
assert queries.queries == [q3]
def test_singling_out_queries():
df = pd.DataFrame({"c1": [1, 1], "c2": [2, 3]})
queries = UniqueSinglingOutQueries()
queries.check_and_append("c1 == 1", df=df) # does not single out
assert len(queries) == 0
queries.check_and_append("c1 == 1 and c2 == 3", df=df) # does single out
assert len(queries) == 1
@pytest.mark.parametrize(
"query, result", [("c1 == 0 and c2 == 'a'", 2), ("c3 == 'fuffa'", None), ("c1 == 2 and c2 == 'c'", 1)]
)
def test_safe_query_counts(query, result):
df = pd.DataFrame({"c1": [0, 0, 2], "c2": ["a", "a", "c"]})
assert safe_query_counts(query=query, df=df) == result
def test_univariate_singling_out_queries():
df = pd.DataFrame({"col1": ["a", "b", "c", "d"]})
queries = univariate_singling_out_queries(df=df, n_queries=10)
expected_queries = ["col1 == 'a'", "col1 == 'b'", "col1 == 'c'", "col1 == 'd'"]
assert sorted(queries) == sorted(expected_queries)
def test_singling_out_query_generator():
df = pd.DataFrame({"c0": ["a", "b"], "c1": [1.23, 9.87]})
queries = multivariate_singling_out_queries(df=df, n_queries=2, n_cols=2)
possible_queries = [
"c1<= 1.23 & c1>= 9.87",
"c1<= 1.23 & c0== 'b'",
"c1<= 1.23 & c0== 'a'",
"c1>= 9.87 & c1<= 1.23",
"c1>= 9.87 & c0== 'b'",
"c1>= 9.87 & c0== 'a'",
"c0== 'b' & c1<= 1.23",
"c0== 'b' & c1>= 9.87",
"c0== 'b' & c0== 'a'",
"c0== 'a' & c1<= 1.23",
"c0== 'a' & c1>= 9.87",
"c0== 'a' & c0== 'b'",
]
for query in queries:
assert query in possible_queries
@pytest.mark.parametrize("confidence_level", [0.5, 0.68, 0.95, 0.99])
@pytest.mark.parametrize("mode", ["univariate", "multivariate"])
def test_singling_out_risk_estimate(confidence_level, mode):
ori = get_adult("ori", 10)
soe = SinglingOutEvaluator(ori=ori, syn=ori, n_attacks=5)
soe.evaluate(mode=mode)
risk, ci = soe.risk(confidence_level=confidence_level)
np.testing.assert_allclose(ci[1], 1.0)
def test_evaluator_not_evaluated():
soe = SinglingOutEvaluator(ori=pd.DataFrame(), syn=pd.DataFrame())
with pytest.raises(RuntimeError):
soe.risk()
@pytest.mark.parametrize("n", [100, 4242, 11235])
@pytest.mark.parametrize("w_min, w_max", [(0, 1), (1 / 10000, 1 / 1000), (0.0013414, 0.2314)])
def test_probability_integral(n, w_min, w_max):
def _so_probability(n: int, w: float):
return n * w * ((1 - w) ** (n - 1))
desired, _ = integrate.quad(lambda x: _so_probability(w=x, n=n), a=w_min, b=w_max)
integral = singling_out_probability_integral(n=n, w_min=w_min, w_max=w_max)
np.testing.assert_almost_equal(desired, integral)
| 4,203 | 32.632 | 106 | py |
anonymeter | anonymeter-main/tests/test_mixed_types_kneigbors.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
import numpy as np
import pandas as pd
import pytest
from anonymeter.neighbors.mixed_types_kneighbors import MixedTypeKNeighbors, gower_distance
from tests.fixtures import get_adult
rng = np.random.default_rng()
def test_mixed_type_kNN():
df = get_adult("ori", n_samples=10)
nn = MixedTypeKNeighbors().fit(df)
shuffled_idx = rng.integers(10, size=10)
dist, ids = nn.kneighbors(df.iloc[shuffled_idx], n_neighbors=1, return_distance=True)
np.testing.assert_equal(ids.flatten(), shuffled_idx)
np.testing.assert_equal(dist, 0)
def test_mixed_type_kNN_numerical():
ori = pd.DataFrame([[0.0, "a"], [0.2, "a"], [0.15, "a"], [0.1, "a"]])
syn = pd.DataFrame([[0.01, "a"]])
nn = MixedTypeKNeighbors().fit(ori)
ids = nn.kneighbors(syn, n_neighbors=4, return_distance=False)
np.testing.assert_equal(ids, [[0, 3, 2, 1]])
def test_mixed_type_kNN_numerical_scaling():
ori = pd.DataFrame([[0.0, "a"], [0.2, "a"], [0.15, "a"], [0.1, "a"]])
# this is equal to the min value in the fitted dataframe.
# The distance to the 2nd record in ori will be maximal.
syn = pd.DataFrame([[0.0, "a"]])
nn = MixedTypeKNeighbors().fit(ori)
dist, ids = nn.kneighbors(syn, n_neighbors=4, return_distance=True)
np.testing.assert_equal(ids, [[0, 3, 2, 1]])
np.testing.assert_equal(dist[ids == 1], 1)
@pytest.mark.parametrize("n_neighbors, n_queries", [(1, 10), (3, 5)])
def test_mixed_type_kNN_shape(n_neighbors, n_queries):
df = get_adult("ori", n_samples=10)
nn = MixedTypeKNeighbors(n_neighbors=n_neighbors).fit(df)
ids = nn.kneighbors(df.head(n_queries))
assert ids.shape == (n_queries, n_neighbors)
nn = MixedTypeKNeighbors().fit(df)
ids = nn.kneighbors(df.head(n_queries), n_neighbors=n_neighbors)
assert ids.shape == (n_queries, n_neighbors)
@pytest.mark.parametrize(
"r0, r1, expected",
[
([0, 1, 0, 0], [0, 1, 0, 0], 0),
([1, 1, 0, 0], [0, 1, 0, 0], 1),
([1, 1, 1, 0], [0, 1, 0, 0], 2),
([1, 0, 1, 0], [1, 1, 0, 1], 3),
([1, 0, 1, 0], [0, 1, 0, 1], 4),
],
)
def test_gower_distance(r0, r1, expected):
r0, r1 = np.array(r0), np.array(r1)
dist = gower_distance(r0=r0, r1=r1, cat_cols_index=0)
np.testing.assert_equal(dist, expected)
# numerical and categorical should behave the same
dist = gower_distance(r0=r0, r1=r1, cat_cols_index=4)
np.testing.assert_equal(dist, expected)
def test_gower_distance_numerical():
r0, r1 = rng.random(size=10), rng.random(size=10)
dist = gower_distance(r0=r0, r1=r1, cat_cols_index=10)
np.testing.assert_almost_equal(dist, np.sum(np.abs(r0 - r1)))
| 2,859 | 35.202532 | 91 | py |
anonymeter | anonymeter-main/tests/test_confidence.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details.
import numpy as np
import pytest
from anonymeter.stats.confidence import (
EvaluationResults,
SuccessRate,
bind_value,
probit,
residual_success,
success_rate,
)
def test_probit():
assert np.round(probit(0.95), decimals=2) == 1.96
@pytest.mark.parametrize(
"n_success, expected_risk, expected_error",
[
(850, 0.849, 0.022),
(0, 0.002, 0.002),
(1000, 0.998, 0.002),
],
)
def test_success_rate(n_success, expected_risk, expected_error):
rate, error = success_rate(n_total=1000, n_success=n_success, confidence_level=0.95)
assert np.round(rate, decimals=3) == expected_risk
assert np.round(error, decimals=3) == expected_error
@pytest.mark.parametrize(
"attack_rate, control_rate, expected",
[
(SuccessRate(0.9, 0.0), SuccessRate(0.8, 0.0), SuccessRate(0.5, 0.0)),
(SuccessRate(0.9, 0.02), SuccessRate(0.85, 0.02), SuccessRate(0.333, 0.16)),
],
)
def test_residual_success(attack_rate, control_rate, expected):
residual = residual_success(attack_rate=attack_rate, control_rate=control_rate)
np.testing.assert_equal(np.round(residual, decimals=3), expected)
@pytest.mark.parametrize(
"point_estimate, error_bound, expected",
[
(0.1, 0.3, (0.1, 0.0, 0.4)),
(1.1, 0.5, (1.0, 0.6, 1.0)),
(-0.1, 0.2, (0.0, 0.0, 0.1)),
],
)
def test_bind_value(point_estimate, error_bound, expected):
risk = bind_value(point_estimate, error_bound)
np.testing.assert_almost_equal(np.array([risk.value, risk.ci[0], risk.ci[1]]), expected)
@pytest.mark.parametrize(
"n_attacks, n_success, n_baseline",
[(100, 100, 0), (100, 23, 11), (111, 84, 42), (100, 0, 100)],
)
def test_evaluation_results_simple(n_attacks, n_success, n_baseline):
results = EvaluationResults(
n_attacks=n_attacks,
n_success=n_success,
n_baseline=n_baseline,
n_control=None,
confidence_level=0,
)
risk = results.risk()
baseline_risk = results.risk(baseline=True)
assert results.control_rate is None
assert results.attack_rate.value == n_success / n_attacks
assert results.baseline_rate.value == n_baseline / n_attacks
assert risk.value == n_success / n_attacks
assert baseline_risk.value == n_baseline / n_attacks
assert risk.ci == (risk.value, risk.value)
assert baseline_risk.ci == (baseline_risk.value, baseline_risk.value)
@pytest.mark.parametrize(
"n_attacks, n_success, n_baseline, n_control, confidence_level, expected_rate, expected_baseline",
[
(
100,
100,
0,
None,
0.95,
SuccessRate(value=0.9815032508965071, error=0.01849674910349284),
SuccessRate(value=0.01849674910349284, error=0.01849674910349284),
),
(
100,
100,
0,
None,
0.68,
SuccessRate(value=0.9951036894831882, error=0.004896310516811869),
SuccessRate(value=0.0048963105168118685, error=0.004896310516811869),
),
(
100,
23,
11,
None,
0.95,
SuccessRate(value=0.23998824451588613, error=0.08155558571285167),
SuccessRate(value=0.1244274643007244, error=0.06188550073007873),
),
],
)
def test_evaluation_results_confidence(
n_attacks,
n_success,
n_baseline,
n_control,
confidence_level,
expected_rate,
expected_baseline,
):
results = EvaluationResults(
n_attacks=n_attacks,
n_success=n_success,
n_baseline=n_baseline,
n_control=n_control,
confidence_level=confidence_level,
)
np.testing.assert_equal(results.attack_rate, expected_rate)
np.testing.assert_equal(results.baseline_rate, expected_baseline)
np.testing.assert_equal(results.risk(baseline=False), expected_rate.to_risk())
np.testing.assert_equal(results.risk(baseline=True), expected_baseline.to_risk())
def test_evaluation_results_warns_baseline():
with pytest.warns(UserWarning):
EvaluationResults(
n_attacks=100,
n_success=49,
n_baseline=50,
n_control=None,
confidence_level=0.95,
)
def test_evaluation_results_warns_control():
with pytest.warns(UserWarning):
EvaluationResults(n_attacks=100, n_success=49, n_baseline=0, n_control=100, confidence_level=0)
@pytest.mark.parametrize("confidence_level", [-0.1, 1.2])
def test_confidence_exception(confidence_level):
with pytest.raises(ValueError):
EvaluationResults(
n_attacks=100,
n_success=49,
n_baseline=0,
n_control=None,
confidence_level=confidence_level,
)
| 5,026 | 29.283133 | 103 | py |
anonymeter | anonymeter-main/tests/__init__.py | 0 | 0 | 0 | py | |
anonymeter | anonymeter-main/tests/fixtures.py | # This file is part of Anonymeter and is released under BSD 3-Clause Clear License.
# Copyright (c) 2022 Anonos IP LLC.
# See https://github.com/statice/anonymeter/blob/main/LICENSE.md for details..
import os
from typing import Optional
import pandas as pd
TEST_DIR_PATH = os.path.dirname(os.path.realpath(__file__))
def get_adult(which: str, n_samples: Optional[int] = None) -> pd.DataFrame:
"""Fixture for the adult dataset.
For details see:
https://archive.ics.uci.edu/ml/datasets/adult
Parameters
----------
which : str, in ['ori', 'syn']
Whether to return the "original" or "synthetic" samples.
n_samples : int
Number of sample records to return.
If `None` - return all samples.
Returns
-------
df : pd.DataFrame
Adult dataframe.
"""
if which == "ori":
fname = "adults_ori.csv"
elif which == "syn":
fname = "adults_syn.csv"
else:
return ValueError(f"Invalid value {which} for parameter `which`. Available are: 'ori' or 'syn'.")
return pd.read_csv(os.path.join(TEST_DIR_PATH, "datasets", fname), nrows=n_samples)
| 1,143 | 26.902439 | 105 | py |
DCN | DCN-master/SC_MNIST.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 13 14:03:37 2016
Try out SC on MNIST
@author: yang4173
"""
from sklearn.cluster import SpectralClustering
import scipy.io as sio
from sklearn import metrics
from sklearn.neighbors import kneighbors_graph
from sklearn.manifold import spectral_embedding
from sklearn.cluster import KMeans
import gzip
import cPickle
import numpy
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
# perform SC on the test set
data_x, data_y = test_set
k = 12
nClass = 500
A = kneighbors_graph(data_x, k)
V = spectral_embedding(A, n_components = 10, drop_first = False)
V = V + numpy.absolute(numpy.min(V))
#V = V/numpy.amax(V)
#
km_model = KMeans(n_clusters = nClass)
ypred = km_model.fit_predict(V)
nmi = metrics.normalized_mutual_info_score(data_y, ypred)
print('The NMI is: %.4f'%nmi)
#
V = numpy.float32(V)
f = gzip.open('EVD-test500.pkl.gz', 'wb')
cPickle.dump([(V, data_y), 0, 0], f, protocol = 2)
f.close()
#sio.savemat('V_train_10.mat', {'train_x': V, 'train_y': data_y})
#sc = SpectralClustering(n_clusters = nClass, affinity = 'nearest_neighbors', n_neighbors = k)
#sc = SpectralClustering(n_clusters = 10, affinity = 'rbf',gamma = 1, n_neighbors = 10)
#data_x = data_x[0:1000]
#data_y = data_y[0:1000]
#ypred = sc.fit_predict(data_x)
#nmi = metrics.normalized_mutual_info_score(data_y, ypred)
#ari = metrics.adjusted_rand_score(data_y, ypred)
#print 'NMI is: %.4f' %nmi
#print 'ARI is: %.4f' %ari | 1,492 | 24.741379 | 94 | py |
DCN | DCN-master/convolutional_ae.py | """This tutorial introduces the LeNet5 neural network architecture
using Theano. LeNet5 is a convolutional neural network, good for
classifying images. This tutorial shows how to build the architecture,
and comes with all the hyper-parameters you need to reproduce the
paper's MNIST results.
This implementation simplifies the model in the following ways:
- LeNetConvPool doesn't implement location-specific gain and bias parameters
- LeNetConvPool doesn't implement pooling by average, it implements pooling
by max.
- Digit classification is implemented with a logistic regression rather than
an RBF network
- LeNet5 was not fully-connected convolutions at second layer
References:
- Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
Gradient-Based Learning Applied to Document
Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
"""
from __future__ import print_function
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from theano.tensor.signal import pool
from theano.tensor.nnet import conv2d
from sklearn.cluster import KMeans
from sklearn import metrics
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) //
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
input_shape=image_shape
)
# pool each feature map individually, using maxpooling
pooled_out = pool.pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
# keep track of model input
self.input = input
#def evaluate_lenet5(learning_rate=0.1, n_epochs=100,
# dataset='mnist.pkl.gz',
# nkerns=[20, 50], batch_size=500):
""" Demonstrates lenet on MNIST dataset
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: path to the dataset used for training /testing (MNIST here)
:type nkerns: list of ints
:param nkerns: number of kernels on each layer
"""
learning_rate=0.01
n_epochs=100
nClass = 10
dataset='mnist.pkl.gz'
nkerns=[20, 50]
batch_size=500
rng = numpy.random.RandomState(23455)
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
label_true = train_set_y.eval()
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches //= batch_size
n_valid_batches //= batch_size
n_test_batches //= batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (28, 28) is the size of MNIST images.
layer0_input = x.reshape((batch_size, 1, 28, 28))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 4 * 4,
n_out=12,
activation=T.nnet.sigmoid
)
layer3 = HiddenLayer(
rng,
input=layer2.output,
n_in = 12,
n_out= 28*28,
activation=T.nnet.sigmoid
)
z = layer3.output
cost = -T.mean(T.sum(x*T.log(z) + (1-x)*T.log(1-z), axis = 1))
# classify the values of the fully-connected sigmoidal layer
#layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
#
## the cost we minimize during training is the NLL of the model
#cost = layer3.negative_log_likelihood(y)
# create a function to compute the mistakes that are made by the model
#test_model = theano.function(
# [index],
# layer3.errors(y),
# givens={
# x: test_set_x[index * batch_size: (index + 1) * batch_size],
# y: test_set_y[index * batch_size: (index + 1) * batch_size]
# }
#)
#
#validate_model = theano.function(
# [index],
# layer3.errors(y),
# givens={
# x: valid_set_x[index * batch_size: (index + 1) * batch_size],
# y: valid_set_y[index * batch_size: (index + 1) * batch_size]
# }
#)
# create a list of all model parameters to be fit by gradient descent
params = layer3.params + layer2.params + layer1.params + layer0.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i], grads[i]) pairs.
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-1
###############
# TRAIN MODEL #
###############
print('... training')
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience // 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
out = layer2.output
get_hidden = theano.function(
[index],
outputs = out,
givens = {x: train_set_x[index * batch_size: (index + 1) * batch_size]}
)
km = KMeans(n_clusters = nClass)
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print('training @ iter = ', iter)
cost_ij = train_model(minibatch_index)
print('Mini-batch cost: %.4f'%cost_ij)
# if (iter + 1) % validation_frequency == 0:
#
# # compute zero-one loss on validation set
# validation_losses = [validate_model(i) for i
# in range(n_valid_batches)]
# this_validation_loss = numpy.mean(validation_losses)
# print('epoch %i, minibatch %i/%i, validation error %f %%' %
# (epoch, minibatch_index + 1, n_train_batches,
# this_validation_loss * 100.))
#
# # if we got the best validation score until now
# if this_validation_loss < best_validation_loss:
#
# #improve patience if loss improvement is good enough
# if this_validation_loss < best_validation_loss * \
# improvement_threshold:
# patience = max(patience, iter * patience_increase)
#
# # save best validation score and iteration number
# best_validation_loss = this_validation_loss
# best_iter = iter
#
# # test it on the test set
# test_losses = [
# test_model(i)
# for i in range(n_test_batches)
# ]
# test_score = numpy.mean(test_losses)
# print((' epoch %i, minibatch %i/%i, test error of '
# 'best model %f %%') %
# (epoch, minibatch_index + 1, n_train_batches,
# test_score * 100.))
#
# if patience <= iter:
# done_looping = True
# break
hidden_val = []
for batch_index in xrange(n_train_batches):
hidden_val.append(get_hidden(batch_index))
hidden_array = numpy.asarray(hidden_val)
hidden_size = hidden_array.shape
hidden_array = numpy.reshape(hidden_array, (hidden_size[0] * hidden_size[1], hidden_size[2] ))
km_idx = km.fit_predict(hidden_array)
nmi = metrics.adjusted_mutual_info_score(label_true, km_idx)
end_time = timeit.default_timer()
print('Optimization complete.')
print('NMI is: %.4f'%nmi)
#print('Best validation score of %f %% obtained at iteration %i, '
# 'with test performance %f %%' %
# (best_validation_loss * 100., best_iter + 1, test_score * 100.))
# print(('The code for file ' +
# os.path.split(__file__)[1] +
# ' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)
#if __name__ == '__main__':
# evaluate_lenet5()
def experiment(state, channel):
evaluate_lenet5(state.learning_rate, dataset=state.dataset)
| 13,137 | 32.430025 | 94 | py |
DCN | DCN-master/dA_init.py | """
This tutorial introduces denoising auto-encoders (dA) using Theano.
Denoising autoencoders are the building blocks for SdA.
They are based on auto-encoders as the ones used in Bengio et al. 2007.
An autoencoder takes an input x and first maps it to a hidden representation
y = f_{\theta}(x) = s(Wx+b), parameterized by \theta={W,b}. The resulting
latent representation y is then mapped back to a "reconstructed" vector
z \in [0,1]^d in input space z = g_{\theta'}(y) = s(W'y + b'). The weight
matrix W' can optionally be constrained such that W' = W^T, in which case
the autoencoder is said to have tied weights. The network is trained such
that to minimize the reconstruction error (the error between x and z).
For the denosing autoencoder, during training, first x is corrupted into
\tilde{x}, where \tilde{x} is a partially destroyed version of x by means
of a stochastic mapping. Afterwards y is computed as before (using
\tilde{x}), y = s(W\tilde{x} + b) and z as s(W'y + b'). The reconstruction
error is now measured between z and the uncorrupted input x, which is
computed as the cross-entropy :
- \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)]
References :
- P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and
Composing Robust Features with Denoising Autoencoders, ICML'08, 1096-1103,
2008
- Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise
Training of Deep Networks, Advances in Neural Information Processing
Systems 19, 2007
"""
import os
import sys
import timeit
import numpy
from six.moves import cPickle
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from logistic_sgd import load_data
from utils import tile_raster_images
from dA import dA
try:
import PIL.Image as Image
except ImportError:
import Image
#class dA(object):
# """Denoising Auto-Encoder class (dA)
#
# A denoising autoencoders tries to reconstruct the input from a corrupted
# version of it by projecting it first in a latent space and reprojecting
# it afterwards back in the input space. Please refer to Vincent et al.,2008
# for more details. If x is the input then equation (1) computes a partially
# destroyed version of x by means of a stochastic mapping q_D. Equation (2)
# computes the projection of the input into the latent space. Equation (3)
# computes the reconstruction of the input, while equation (4) computes the
# reconstruction error.
#
# .. math::
#
# \tilde{x} ~ q_D(\tilde{x}|x) (1)
#
# y = s(W \tilde{x} + b) (2)
#
# x = s(W' y + b') (3)
#
# L(x,z) = -sum_{k=1}^d [x_k \log z_k + (1-x_k) \log( 1-z_k)] (4)
#
# """
#
# def __init__(
# self,
# numpy_rng,
# theano_rng=None,
# input=None,
# n_visible=784,
# n_hidden=500,
# W=None,
# bhid=None,
# bvis=None
# ):
# """
# Initialize the dA class by specifying the number of visible units (the
# dimension d of the input ), the number of hidden units ( the dimension
# d' of the latent or hidden space ) and the corruption level. The
# constructor also receives symbolic variables for the input, weights and
# bias. Such a symbolic variables are useful when, for example the input
# is the result of some computations, or when weights are shared between
# the dA and an MLP layer. When dealing with SdAs this always happens,
# the dA on layer 2 gets as input the output of the dA on layer 1,
# and the weights of the dA are used in the second stage of training
# to construct an MLP.
#
# :type numpy_rng: numpy.random.RandomState
# :param numpy_rng: number random generator used to generate weights
#
# :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
# :param theano_rng: Theano random generator; if None is given one is
# generated based on a seed drawn from `rng`
#
# :type input: theano.tensor.TensorType
# :param input: a symbolic description of the input or None for
# standalone dA
#
# :type n_visible: int
# :param n_visible: number of visible units
#
# :type n_hidden: int
# :param n_hidden: number of hidden units
#
# :type W: theano.tensor.TensorType
# :param W: Theano variable pointing to a set of weights that should be
# shared belong the dA and another architecture; if dA should
# be standalone set this to None
#
# :type bhid: theano.tensor.TensorType
# :param bhid: Theano variable pointing to a set of biases values (for
# hidden units) that should be shared belong dA and another
# architecture; if dA should be standalone set this to None
#
# :type bvis: theano.tensor.TensorType
# :param bvis: Theano variable pointing to a set of biases values (for
# visible units) that should be shared belong dA and another
# architecture; if dA should be standalone set this to None
#
#
# """
# self.n_visible = n_visible
# self.n_hidden = n_hidden
#
# # create a Theano random generator that gives symbolic random values
# if not theano_rng:
# theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
#
# # note : W' was written as `W_prime` and b' as `b_prime`
# if not W:
# # W is initialized with `initial_W` which is uniformely sampled
# # from -4*sqrt(6./(n_visible+n_hidden)) and
# # 4*sqrt(6./(n_hidden+n_visible))the output of uniform if
# # converted using asarray to dtype
# # theano.config.floatX so that the code is runable on GPU
# initial_W = numpy.asarray(
# numpy_rng.uniform(
# low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
# high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
# size=(n_visible, n_hidden)
# ),
# dtype=theano.config.floatX
# )
# W = theano.shared(value=initial_W, name='W', borrow=True)
#
# if not bvis:
# bvis = theano.shared(
# value=numpy.zeros(
# n_visible,
# dtype=theano.config.floatX
# ),
# borrow=True
# )
#
# if not bhid:
# bhid = theano.shared(
# value=numpy.zeros(
# n_hidden,
# dtype=theano.config.floatX
# ),
# name='b',
# borrow=True
# )
#
# self.W = W
# # b corresponds to the bias of the hidden
# self.b = bhid
# # b_prime corresponds to the bias of the visible
# self.b_prime = bvis
# # tied weights, therefore W_prime is W transpose
# self.W_prime = self.W.T
# self.theano_rng = theano_rng
# # if no input is given, generate a variable representing the input
# if input is None:
# # we use a matrix because we expect a minibatch of several
# # examples, each example being a row
# self.x = T.dmatrix(name='input')
# else:
# self.x = input
#
#
# self.params = [self.W, self.b, self.b_prime]
#
# def get_corrupted_input(self, input, corruption_level):
# """This function keeps ``1-corruption_level`` entries of the inputs the
# same and zero-out randomly selected subset of size ``coruption_level``
# Note : first argument of theano.rng.binomial is the shape(size) of
# random numbers that it should produce
# second argument is the number of trials
# third argument is the probability of success of any trial
#
# this will produce an array of 0s and 1s where 1 has a
# probability of 1 - ``corruption_level`` and 0 with
# ``corruption_level``
#
# The binomial function return int64 data type by
# default. int64 multiplicated by the input
# type(floatX) always return float64. To keep all data
# in floatX when floatX is float32, we set the dtype of
# the binomial to floatX. As in our case the value of
# the binomial is always 0 or 1, this don't change the
# result. This is needed to allow the gpu to work
# correctly as it only support float32 for now.
#
# """
# return self.theano_rng.binomial(size=input.shape, n=1,
# p=1 - corruption_level,
# dtype=theano.config.floatX) * input
#
# def get_hidden_values(self, input):
# """ Computes the values of the hidden layer """
# return T.nnet.sigmoid(T.dot(input, self.W) + self.b)
#
# def get_reconstructed_input(self, hidden):
# """Computes the reconstructed input given the values of the
# hidden layer
#
# """
# return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)
#
# def get_cost_updates(self, corruption_level, learning_rate):
# """ This function computes the cost and the updates for one trainng
# step of the dA """
#
# tilde_x = self.get_corrupted_input(self.x, corruption_level)
# y = self.get_hidden_values(tilde_x)
# z = self.get_reconstructed_input(y)
# # note : we sum over the size of a datapoint; if we are using
# # minibatches, L will be a vector, with one entry per
# # example in minibatch
# L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
# # note : L is now a vector, where each element is the
# # cross-entropy cost of the reconstruction of the
# # corresponding example of the minibatch. We need to
# # compute the average of all these to get the cost of
# # the minibatch
# cost = T.mean(L)
#
# # compute the gradients of the cost of the `dA` with respect
# # to its parameters
# gparams = T.grad(cost, self.params)
# # generate the list of updates
# updates = [
# (param, param - learning_rate * gparam)
# for param, gparam in zip(self.params, gparams)
# ]
#
# return (cost, updates)
def test_dA(learning_rate=0.1, training_epochs=15,
dataset='mnist.pkl.gz',
batch_size=20, output_folder='dA_plots'):
"""
This demo is tested on MNIST
:type learning_rate: float
:param learning_rate: learning rate used for training the DeNosing
AutoEncoder
:type training_epochs: int
:param training_epochs: number of epochs used for training
:type dataset: string
:param dataset: path to the picked dataset
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# start-snippet-2
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
# end-snippet-2
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
# os.chdir(output_folder)
####################################
# BUILDING THE MODEL NO CORRUPTION #
####################################
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
da = dA(
numpy_rng=rng,
theano_rng=theano_rng,
input=x,
n_visible=28 * 28,
n_hidden=500
)
cost, updates = da.get_cost_updates(
corruption_level=0.,
learning_rate=learning_rate
)
train_da = theano.function(
inputs = [index],
outputs = cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size]
}
)
start_time = timeit.default_timer()
############
# TRAINING #
############
# go through training epochs
for epoch in xrange(training_epochs):
# go through trainng set
c = []
for batch_index in xrange(n_train_batches):
c.append(train_da(batch_index))
print 'Training epoch %d, cost ' % epoch, numpy.mean(c)
end_time = timeit.default_timer()
training_time = (end_time - start_time)
print >> sys.stderr, ('The no corruption code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((training_time) / 60.))
image = Image.fromarray(
tile_raster_images(X=da.W.get_value(borrow=True).T,
img_shape=(28, 28), tile_shape=(10, 10),
tile_spacing=(1, 1)))
image.save('filters_corruption_0.png')
# Save the trained model
f = open('no_corruption.save', 'wb')
cPickle.dump([param.get_value() for param in da.params], f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
# start-snippet-3
#####################################
# BUILDING THE MODEL CORRUPTION 30% #
#####################################
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
da = dA(
numpy_rng=rng,
theano_rng=theano_rng,
input=x,
n_visible=28 * 28,
n_hidden=500
)
cost, updates = da.get_cost_updates(
corruption_level=0.3,
learning_rate=learning_rate
)
train_da = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size]
}
)
start_time = timeit.default_timer()
############
# TRAINING #
############
# go through training epochs
for epoch in xrange(training_epochs):
# go through trainng set
c = []
for batch_index in xrange(n_train_batches):
c.append(train_da(batch_index))
print 'Training epoch %d, cost ' % epoch, numpy.mean(c)
end_time = timeit.default_timer()
training_time = (end_time - start_time)
print >> sys.stderr, ('The 30% corruption code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % (training_time / 60.))
# end-snippet-3
f = open('corruption.save', 'wb')
cPickle.dump([param.get_value() for param in da.params], f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
# start-snippet-4
image = Image.fromarray(tile_raster_images(
X=da.W.get_value(borrow=True).T,
img_shape=(28, 28), tile_shape=(10, 10),
tile_spacing=(1, 1)))
image.save('filters_corruption_30.png')
# end-snippet-4
#os.chdir('../')
if __name__ == '__main__':
test_dA()
| 15,364 | 34.899533 | 98 | py |
DCN | DCN-master/nystrom.py | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 5 21:53:08 2016
Perform Nystrom Spectral Clustering
ref: Fowlkes, Charless, et al. "Spectral grouping using the Nystrom method."
IEEE transactions on pattern analysis and machine intelligence 26.2 (2004): 214-225.
@author: bo
"""
import numpy as np
from sklearn import metrics
#from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
import cPickle, gzip
import sys
def nystrom(data, K):
"""
data: input data matrix, with size n-by-d.
K: sample size
"""
N, D = data.shape
assert K <= N
pos = np.random.choice(N, size = K, replace = False)
idx = np.zeros(N, dtype = np.bool)
idx[pos] = True
# constructing this way, A is guaranteed to be PSD
A = np.dot(data[idx], data[idx].T)
B = np.dot(data[idx], data[np.logical_not(idx)].T)
eigs, V = np.linalg.eig(A)
eigs = np.real(eigs)
# add a small constant to improve numerical stability
# this might be problematic, since pseudo inverse is used in the paper
A_neg_half = np.dot(V, np.diag((eigs + 1e-8)**(-0.5)))
A_neg_half = np.dot(A_neg_half, V.T)
S = A + np.dot(np.dot(A_neg_half, np.dot(B, B.T)), A_neg_half)
eigs_S, U_S = np.linalg.eig(S)
eigs_S = np.real(eigs_S)
tmp = np.dot(np.concatenate((A, B.T), axis = 0), A_neg_half)
V = np.dot(np.dot(tmp, U_S), np.diag((eigs_S + 1e-8)**(-0.5)))
return V
if __name__ == '__main__':
M = 3000;
K = 4
dataset = 'data-0.pkl.gz'
path = '/home/bo/Data/RCV1/Processed/'
f = gzip.open(path + dataset, 'rb')
data = cPickle.load(f)
f.close()
train_x = data[0].toarray()
train_x = train_x.astype(np.float32)
train_y = np.asarray(data[1], dtype = np.int32)
train_y = np.reshape(train_y, (train_y.shape[0], 1))
dim = train_x.shape[1]
data = np.concatenate((train_x, train_y), axis = 1)
np.random.shuffle(data)
train_x = data[:][:, 0:dim]
train_y = np.int32(np.squeeze(data[:][:, -1]))
V = nystrom(train_x, M)
V = V[:][:, 0:K]
km = KMeans(n_clusters = 4)
ypred = km.fit_predict(V)
nmi = metrics.normalized_mutual_info_score(train_y, ypred)
print >> sys.stderr, ('NMI for deep clustering: %.2f' % (nmi)) | 2,326 | 27.036145 | 89 | py |
DCN | DCN-master/run_pre_mnist.py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 10 08:50:00 2016
Experiments on pre-processed MNIST
@author: bo
"""
import sys
import gzip
import cPickle
import numpy as np
from sklearn import metrics
from sklearn.cluster import KMeans
from multi_layer_km import test_SdC
from cluster_acc import acc
K = 10
trials = 10
filename = 'pre_mnist.pkl.gz'
path = '/home/bo/Data/MNIST/'
dataset = path+filename
with gzip.open(dataset, 'rb') as f:
train_x, train_y = cPickle.load(f)
np.random.seed(seed = 1)
# perform KM
km_model = KMeans(n_clusters = K, n_init = 1)
results_KM = np.zeros((trials, 3))
for i in range(trials):
ypred = km_model.fit_predict(train_x)
nmi = metrics.normalized_mutual_info_score(train_y, ypred)
ari = metrics.adjusted_rand_score(train_y, ypred)
ac = acc(ypred, train_y)
results_KM[i] = np.array([nmi, ari, ac])
KM_mean = np.mean(results_KM, axis = 0)
KM_std = np.std(results_KM, axis = 0)
# perform DCN
config = {'Init': '',
'lbd': 0.1,
'beta': 1,
'output_dir': 'MNIST_results',
'save_file': 'mnist_ssc.pkl.gz',
'pretraining_epochs': 10,
'pretrain_lr': 0.01,
'mu': 0.9,
'finetune_lr': 0.01,
'training_epochs': 50,
'dataset': dataset,
'batch_size': 20,
'nClass': K,
'hidden_dim': [50, 20, 5],
'diminishing': False}
results = []
for i in range(trials):
res_metrics = test_SdC(**config)
results.append(res_metrics)
results_SAEKM = np.zeros((trials, 3))
results_DCN = np.zeros((trials, 3))
N = config['training_epochs']/5
for i in range(trials):
results_SAEKM[i] = results[i][0]
results_DCN[i] = results[i][N]
SAEKM_mean = np.mean(results_SAEKM, axis = 0)
SAEKM_std = np.std(results_SAEKM, axis = 0)
DCN_mean = np.mean(results_DCN, axis = 0)
DCN_std = np.std(results_DCN, axis = 0)
print >> sys.stderr, ('KM avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(KM_mean[0],
KM_mean[1], KM_mean[2]) )
print >> sys.stderr, ('SAE+KM avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(SAEKM_mean[0],
SAEKM_mean[1], SAEKM_mean[2]) )
print >> sys.stderr, ('DCN avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(DCN_mean[0],
DCN_mean[1], DCN_mean[2]) )
| 2,424 | 27.197674 | 102 | py |
DCN | DCN-master/MC.py | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 5 13:17:42 2016
Perform Monto-Calro simulations of: KM, SC, SNMF, DCN (deep clustering network) and NJ-DCN (non-joint, SAE + KM)
The experiment with SNMF is done by saving the data files, and run SNMF with MATLAB.
@author: yang4173
"""
import os
import numpy as np
import gzip
import cPickle
import matplotlib.pyplot as plt
from scipy.io import savemat
import sys
from sklearn.cluster import SpectralClustering
from sklearn.cluster import KMeans
from sklearn import metrics
from sklearn.decomposition import NMF
from multi_layer_km import test_SdC
from multi_layer_km_nj import test_SdC_NJ
def sigmoid(x):
return 1/(1 + np.exp(-x))
# number of trials
N = 5
nClass = 4;
num = 1000;
sigma = 2
dim = 100
c = 10*np.array([[1,1], [0,0], [1, 0], [0, 1]])
nmi_km = np.zeros(N)
ari_km = np.zeros(N)
nmi_sc = np.zeros(N)
ari_sc = np.zeros(N)
nmi_nj = np.zeros(N)
ari_nj = np.zeros(N)
nmi_dc = np.zeros(N)
ari_dc = np.zeros(N)
data_folder = 'data'
for n in range(N):
lowD_x = np.zeros((nClass*num, 2))
train_y = np.zeros((nClass*num, 1))
for i in xrange(nClass):
lowD_x[i*num: (i+1) *num] = np.tile(c[i,:], (num,1)) + sigma*np.random.randn(num, 2)
# Class lables: 0, 1, 2...
train_y[i*num: (i+1) *num] = i*np.ones((num, 1))
train_y0 = train_y
W = np.random.randn(100, 2)
train_x = np.power(sigmoid(np.dot(lowD_x, W.T)), 2)
# W = np.random.randn(100, 2)
# train_x = np.tanh(sigmoid(np.dot(lowD_x, W.T)))
data = np.concatenate((train_x, train_y), axis = 1)
np.random.shuffle(data)
train_x = data[:][:,0:dim]
train_y = np.int32(data[:][:, -1])
# save the data
os.chdir(data_folder)
savemat('data_'+str(n)+'.mat', {'train_x':train_x, 'train_y': train_y})
os.chdir('../')
## Perform KMeans
km = KMeans(n_clusters= nClass, init='k-means++', n_init=10)
ypred = km.fit_predict(train_x)
nmi_km[n] = metrics.adjusted_mutual_info_score(train_y, ypred)
ari_km[n] = metrics.adjusted_rand_score(train_y, ypred)
## Perform spectral clustering
sc = SpectralClustering(n_clusters= nClass, n_init=10, gamma=0.1, affinity='rbf', assign_labels='kmeans')
ypred = sc.fit_predict(train_x)
nmi_sc[n] = metrics.adjusted_mutual_info_score(train_y, ypred)
ari_sc[n] = metrics.adjusted_rand_score(train_y, ypred)
train_set = train_x, train_y
dataset = [train_set, train_set, train_set]
f = gzip.open('toy.pkl.gz','wb')
cPickle.dump(dataset, f, protocol=2)
f.close()
## Perform non-joint SAE+KM
nmi_nj[n], ari_nj[n] = test_SdC_NJ(lbd = 0, finetune_lr= .01, mu = 0.9, pretraining_epochs=50,
pretrain_lr=.01, training_epochs=100,
dataset='toy.pkl.gz', batch_size=20, nClass = nClass, hidden_dim = [100, 50, 10, 2])
## Perform proposed
nmi_dc[n], ari_dc[n] = test_SdC(lbd = 0.2, finetune_lr= .01, mu = 0.9, pretraining_epochs=50,
pretrain_lr=0.01, training_epochs=100,
dataset='toy.pkl.gz', batch_size=20, nClass = nClass,
hidden_dim = [100, 50, 10, 2])
result = np.concatenate((np.mean(nmi_km, keepdims=True), np.mean(ari_km, keepdims=True), np.mean(nmi_sc, keepdims=True),
np.mean(ari_sc, keepdims=True), np.mean(nmi_nj, keepdims=True), np.mean(ari_nj, keepdims=True),
np.mean(nmi_dc, keepdims=True), np.mean(ari_dc, keepdims=True)) )
f = gzip.open('MC_results.pkl.gz','wb')
cPickle.dump(result, f, protocol=2)
f.close()
| 3,641 | 28.609756 | 120 | py |
DCN | DCN-master/load_network.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 13 09:29:43 2016
@author: yang4173
This script loads a saved network, calculate the learned representation and save for future use.
"""
import cPickle, gzip
import os, sys
from multi_layer_km import SdC, load_rcv
from deepclustering import load_data
import numpy
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from mnist_loader import MNIST
from cluster_acc import acc
from sklearn import svm
import theano.tensor as T
import theano
#saved_network = 'deepclus.save'
#dataset='mnist.pkl.gz'
os.chdir('./MNIST_results/Finalized/')
#saved_network = 'deepclus_'+str(nClass)+ '_clusters.pkl.gz'
saved_network = 'deepclus_10_clusters.pkl.gz'
with gzip.open(saved_network, 'rb') as f:
saved_result = cPickle.load(f)
param_init = saved_result['network']
hidden_dim = [2000, 1000, 1000, 1000, 50]
lbd = 1
#hidden_dim = saved_result['config']['hidden_dim']
#lbd = saved_result['config']['lbd']
## MNIST data
#datapath = '/home/bo/Data/MNIST/'
#filename = 'mnist.pkl.gz'
#with gzip.open(datapath + filename, 'rb') as f:
# train_set, test_set, valid_set = cPickle.load(f)
#train_x = numpy.concatenate((train_set[0], test_set[0], valid_set[0]), axis = 0)
#train_y = numpy.concatenate((train_set[1], test_set[1], valid_set[1]), axis = 0)
#inDim = train_x.shape[1]
## infimnist data
#datapath = '/home/bo/Data/infimnist/'
#path_img = datapath + 'mnist500k-images-idx1-ubyte'
#path_lbl = datapath + 'mnist500k-labels-idx1-ubyte'
#train_x, train_y = MNIST.load(path_img, path_lbl)
#
#data = train_x
#label_true = train_y
#inDim = data.shape[1]
#datasets = load_data(dataset)
#train_set_x, train_set_y = datasets[0]
#data = train_set_x.get_value()
#label_true = train_set_y.get_value()
#
#inDim = data.shape[1]
## find a better way to save and load model params
#lbd = 1
#hidden_dim = [1000, 500, 250, 2]
## RCV1
datapath = '/home/bo/Data/RCV1/Processed/'
filename = 'data-0.pkl.gz'
batch_size = 100
datasets = load_rcv(datapath + filename, batch_size)
train_set_x, train_set_y = datasets[0]
train_y = numpy.squeeze(train_set_y.get_value())
inDim = train_set_x.get_value().shape[1]
numpy_rng = numpy.random.RandomState(125)
x = T.matrix('x')
index = T.lscalar()
sdc = SdC(
numpy_rng=numpy_rng,
n_ins=inDim,
lbd = lbd,
input = x,
hidden_layers_sizes= hidden_dim,
Param_init = param_init
)
out = sdc.get_output()
out_sdc = theano.function(
[index],
outputs = out,
givens = {x: train_set_x[index * batch_size: (index + 1) * batch_size]}
)
hidden_val = []
N = train_set_x.get_value(borrow=True).shape[0]
n_train_batches = N/batch_size
for batch_index in xrange(n_train_batches):
hidden_val.append(out_sdc(batch_index))
hidden_array = numpy.asarray(hidden_val)
hidden_size = hidden_array.shape
hidden_array = numpy.reshape(hidden_array, (hidden_size[0] * hidden_size[1], hidden_size[2] ))
### Train a SVM classifier
train_pct = 0.8
train_num = numpy.floor(N*0.9)
svm_train_x = hidden_array[0:train_num]
svm_test_x = hidden_array[train_num:]
svm_train_y = train_y[0:train_num]
svm_test_y = train_y[train_num:]
svm_model = svm.SVC(kernel = 'linear')
svm_model.fit(svm_train_x, svm_train_y)
ypred = svm_model.predict(svm_test_x)
ac = 1.0*numpy.count_nonzero(numpy.equal(ypred, svm_test_y))/svm_test_y.shape[0]
print >> sys.stderr, ('Acc for classification is: %.2f' % (ac))
### Do a Kmeans clustering
#km = KMeans(n_clusters = nClass)
#ypred = km.fit_predict(Output)
#
#nmi = metrics.normalized_mutual_info_score(train_y, ypred)
#print >> sys.stderr, ('NMI for deep clustering: %.2f' % (nmi))
#
#ari = metrics.adjusted_rand_score(train_y, ypred)
#print >> sys.stderr, ('ARI for deep clustering: %.2f' % (ari))
#
#try:
# ac = acc(ypred, train_y)
#except AssertionError:
# ac = 0
# print('Number of predicted cluster mismatch with ground truth.')
#
#print >> sys.stderr, ('Acc for deep clustering: %.2f' % (ac))
#f = open('LearnedRep.save', 'wb')
#cPickle.dump(Output, f, protocol=cPickle.HIGHEST_PROTOCOL)
#f.close()
#
#print 'Done'
#
#color = ['b', 'g', 'r', 'm', 'k', 'b', 'g', 'r', 'm', 'k']
#marker = ['o', '+','o', '+','o', '+','o', '+','o', '+']
#
## Take 500 samples to plot
#data_to_plot = Output[0:1999]
#label_plot = label_true[0:1999]
#
#x = data_to_plot[:, 0]
#y = data_to_plot[:, 1]
#
#for i in xrange(nClass):
# idx_x = x[numpy.nonzero(label_plot == i)]
# idx_y = y[numpy.nonzero(label_plot == i)]
# plt.figure(3)
# plt.scatter(idx_x, idx_y, s = 70, c = color[i], marker = marker[i], label = '%s'%i)
#
#plt.legend()
#plt.show()
| 4,673 | 26.333333 | 96 | py |
DCN | DCN-master/multi_layer_km.py | # -*- coding: utf-8 -*-
"""
@author: bo
Multiple-layers Deep Clustering
"""
import os
import sys
import timeit
import scipy
import numpy
import cPickle
import gzip
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
from theano.tensor.shared_randomstreams import RandomStreams
from cluster_acc import acc
from sklearn import metrics
from sklearn.cluster import KMeans
from dA import dA
class dA2(dA):
# overload the original function in dA class
# using the ReLU nonlinearity
def __init__(
self,
numpy_rng,
theano_rng=None,
input=None,
n_visible=784,
n_hidden=500,
W=None,
bhid=None,
bvis=None,
gamma = None,
beta = None
):
"""
Initialize the dA2 class by specifying the number of visible units (the
dimension d of the input ), the number of hidden units ( the dimension
d' of the latent or hidden space ) and the corruption level. The
constructor also receives symbolic variables for the input, weights and
bias.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: number random generator used to generate weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type input: theano.tensor.TensorType
:param input: a symbolic description of the input or None for
standalone dA
:type n_visible: int
:param n_visible: number of visible units
:type n_hidden: int
:param n_hidden: number of hidden units
:type W: theano.tensor.TensorType
:param W: Theano variable pointing to a set of weights that should be
shared belong the dA and another architecture; if dA should
be standalone set this to None
:type bhid: theano.tensor.TensorType
:param bhid: Theano variable pointing to a set of biases values (for
hidden units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
:type bvis: theano.tensor.TensorType
:param bvis: Theano variable pointing to a set of biases values (for
visible units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
:type gamma: theano.tensor.TensorType
:param gamma: Tensor variable for implementing batch normalization
:type beta: theano.tensor.TensorType
:param beta: Tensor variable for implementing batch normalization
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
# create a Theano random generator that gives symbolic random values
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# note : W' was written as `W_prime` and b' as `b_prime`
if W is None:
initial_W = numpy.asarray(
0.01*numpy.float32(numpy.random.randn(n_visible, n_hidden))
)
else:
initial_W = W
W = theano.shared(value=initial_W, name='W', borrow=True)
if bvis is None:
bvis = theano.shared(
value=numpy.zeros(
n_visible,
dtype=theano.config.floatX
),
borrow=True
)
else:
bvis = theano.shared(
value=bvis,
borrow=True
)
if bhid is None:
bhid = theano.shared(
value=numpy.zeros(
n_hidden,
dtype=theano.config.floatX
),
name='b',
borrow=True
)
else:
bhid = theano.shared(
value=bhid,
name='b',
borrow=True
)
if gamma is None:
gamma = theano.shared(value = numpy.ones((n_hidden,), dtype=theano.config.floatX), name='gamma')
else:
gamma = theano.shared(value = gamma, name='gamma')
if beta is None:
beta = theano.shared(value = numpy.zeros((n_hidden,),dtype=theano.config.floatX), name='beta')
else:
beta = theano.shared(value = beta, name='beta')
self.W = W
# b corresponds to the bias of the hidden
self.b = bhid
# b_prime corresponds to the bias of the visible
self.b_prime = bvis
# tied weights, therefore W_prime is W transpose
self.W_prime = self.W.T
self.theano_rng = theano_rng
self.gamma = gamma
self.beta = beta
# if no input is given, generate a variable representing the input
if input is None:
# we use a matrix because we expect a minibatch of several
# examples, each example being a row
self.x = T.dmatrix(name='input')
else:
self.x = input
self.params = [self.W, self.b, self.b_prime, self.gamma, self.beta]
# delta is a temporary variable for implementing the momentum method
self.delta = [theano.shared(value = numpy.zeros((n_visible, n_hidden), dtype = theano.config.floatX), borrow=True),
theano.shared(value = numpy.zeros(n_hidden, dtype = theano.config.floatX), borrow = True ),
theano.shared(value = numpy.zeros(n_visible, dtype = theano.config.floatX), borrow = True ),
theano.shared(value = numpy.zeros(n_hidden, dtype = theano.config.floatX), borrow = True ),
theano.shared(value = numpy.zeros(n_hidden, dtype = theano.config.floatX), borrow = True )
]
def get_hidden_values(self, input):
""" Computes the values of the hidden layer """
linear = T.dot(input, self.W) + self.b
bn_output = T.nnet.bn.batch_normalization(inputs = linear,
gamma = self.gamma, beta = self.beta, mean = linear.mean((0,), keepdims=True),
std = T.ones_like(linear.var((0,), keepdims = True)), mode='high_mem')
return T.nnet.relu(bn_output)
def get_reconstructed_input(self, hidden):
"""Computes the reconstructed input given the values of the
hidden layer
"""
return T.nnet.relu(T.dot(hidden, self.W_prime) + self.b_prime)
def get_cost_updates(self, corruption_level, learning_rate, mu):
""" This function computes the cost and the updates for one trainng
step of the dA """
tilde_x = self.get_corrupted_input(self.x, corruption_level)
y = self.get_hidden_values(tilde_x)
z = self.get_reconstructed_input(y)
L = T.sum(T.pow(self.x - z, 2), axis = 1)
cost = T.mean(L)
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = []
for param, delta, gparam in zip(self.params, self.delta, gparams):
updates.append( (delta, mu*delta - learning_rate * gparam) )
updates.append( (param, param + mu*mu*delta - (1+mu)*learning_rate*gparam ))
return (cost, updates)
class SdC(object):
"""
class SdC, main class for deep-clustering network, constructed by stacking multiple dA2 layers.
It is possilbe to initialize the network with a saved network trained before, just pass the network parameters
to Param_init. This facilites parameter tuning for the optimization part, by avoiding performing pre-training
every time.
"""
def __init__(
self,
numpy_rng,
theano_rng=None,
input = None,
n_ins=784,
lbd = 1,
beta = 1,
hidden_layers_sizes=[1000, 200, 10],
corruption_levels=[0, 0, 0],
Param_init = None
):
# self.sigmoid_layers = []
self.dA_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
self.lbd = lbd
self.beta = beta
self.delta = []
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
if input is None:
self.x = T.matrix('x') # the data is presented as rasterized images
else:
self.x = input
self.y = T.ivector('y') # the labels are presented as 1D vector of
for i in xrange(self.n_layers):
# the size of the input is either the number of hidden units of
# the layer below or the input size if we are on the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# the input to this layer is either the activation of the hidden
# layer below or the input of the SdA if you are on the first
# layer
if i == 0:
layer_input = self.x
else:
layer_input = self.dA_layers[-1].get_hidden_values(self.dA_layers[-1].x)
if Param_init is None:
dA_layer = dA2(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i])
else:
dA_layer = dA2(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W = Param_init[5*i],
bhid = Param_init[5*i + 1],
bvis = Param_init[5*i+2],
gamma = Param_init[5*i+3],
beta = Param_init[5*i+4])
self.dA_layers.append(dA_layer)
self.params.extend(dA_layer.params)
self.delta.extend(dA_layer.delta)
def get_output(self):
# return self.sigmoid_layers[-1].output
return self.dA_layers[-1].get_hidden_values(self.dA_layers[-1].x)
def get_network_reconst(self):
reconst = self.get_output()
for da in reversed(self.dA_layers):
reconst = T.nnet.relu(T.dot(reconst, da.W_prime) + da.b_prime)
return reconst
def finetune_cost_updates(self, center, mu, learning_rate):
""" This function computes the cost and the updates ."""
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, withd one entry per
# example in minibatch
network_output = self.get_output()
temp = T.pow(center - network_output, 2)
L = T.sum(temp, axis=1)
# Add the network reconstruction error
z = self.get_network_reconst()
reconst_err = T.sum(T.pow(self.x - z, 2), axis = 1)
L = self.beta*L + self.lbd*reconst_err
cost1 = T.mean(L)
cost2 = self.lbd*T.mean(reconst_err)
cost3 = cost1 - cost2
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost1, self.params)
# generate the list of updates
updates = []
grad_values = []
param_norm = []
for param, delta, gparam in zip(self.params, self.delta, gparams):
updates.append( (delta, mu*delta - learning_rate * gparam) )
updates.append( (param, param + mu*mu*delta - (1+mu)*learning_rate*gparam ))
grad_values.append(gparam.norm(L=2))
param_norm.append(param.norm(L=2))
grad_ = T.stack(*grad_values)
param_ = T.stack(*param_norm)
return ((cost1, cost2, cost3, grad_, param_), updates)
def pretraining_functions(self, train_set_x, batch_size, mu):
''' Generates a list of functions, each of them implementing one
step in trainnig the dA corresponding to the layer with same index.
The function will require as input the minibatch index, and to train
a dA you just need to iterate, calling the corresponding function on
all minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared variable that contains all datapoints used
for training the dA
:type batch_size: int
:param batch_size: size of a [mini]batch
:type mu: float
:param mu: extrapolation parameter used for implementing Nesterov-type acceleration
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
corruption_level = T.scalar('corruption') # % of corruption to use
learning_rate = T.scalar('lr') # learning rate to use
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for dA in self.dA_layers:
# get the cost and the updates list
cost, updates = dA.get_cost_updates(corruption_level,
learning_rate, mu)
# compile the theano function
fn = theano.function(
inputs=[
index,
theano.In(corruption_level),
theano.In(learning_rate)
],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin: batch_end]
},
on_unused_input='ignore'
)
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_functions(self, datasets, centers, batch_size, mu, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on
a batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type centers: numpy ndarray
:param centers: the centroids corresponding to each data sample in the minibatch
:type batch_size: int
:param batch_size: size of a minibatch
:type mu: float
:param mu: extrapolation parameter used for implementing Nesterov-type acceleration
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
'''
(train_set_x, train_set_y) = datasets[0]
index = T.lscalar('index') # index to a [mini]batch
minibatch = T.fmatrix('minibatch')
# compute the gradients with respect to the model parameters
cost, updates = self.finetune_cost_updates(
centers,
mu,
learning_rate=learning_rate
)
minibatch = train_set_x[
index * batch_size: (index + 1) * batch_size
]
train_fn = theano.function(
inputs=[index],
outputs= cost,
updates=updates,
givens={
self.x: minibatch
},
name='train'
)
return train_fn
def load_data(dataset):
"""
Load the dataset, perform shuffling
"""
with gzip.open(dataset, 'rb') as f:
train_x, train_y = cPickle.load(f)
if scipy.sparse.issparse(train_x):
train_x = train_x.toarray()
if train_x.dtype != 'float32':
train_x = train_x.astype(numpy.float32)
if train_y.dtype != 'int32':
train_y = train_y.astype(numpy.int32)
if train_y.ndim > 1:
train_y = numpy.squeeze(train_y)
N = train_x.shape[0]
idx = numpy.random.permutation(N)
train_x = train_x[idx]
train_y = train_y[idx]
return train_x, train_y
def load_data_shared(dataset, batch_size):
"""
Load the dataset and save it as shared-variable to be used by Theano
"""
with gzip.open(dataset, 'rb') as f:
train_x, train_y = cPickle.load(f)
N = train_x.shape[0] - train_x.shape[0] % batch_size
train_x = train_x[0: N]
train_y = train_y[0: N]
# shuffling
idx = numpy.random.permutation(N)
train_x = train_x[idx]
train_y = train_y[idx]
# change sparse matrix into full, to be compatible with CUDA and Theano
if scipy.sparse.issparse(train_x):
train_x = train_x.toarray()
if train_x.dtype != 'float32':
train_x = train_x.astype(numpy.float32)
if train_y.dtype != 'int32':
train_y = train_y.astype(numpy.int32)
if train_y.ndim > 1:
train_y = numpy.squeeze(train_y)
data_x, data_y = shared_dataset((train_x, train_y))
rval = [(data_x, data_y), 0, 0]
return rval
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
#return shared_x, T.cast(shared_y, 'int32')
return shared_x, shared_y
def batch_km(data, center, count):
"""
Function to perform a KMeans update on a batch of data, center is the centroid
from last iteration.
"""
N = data.shape[0]
K = center.shape[0]
# update assignment
idx = numpy.zeros(N, dtype = numpy.int)
for i in range(N):
dist = numpy.inf
ind = 0
for j in range(K):
temp_dist = numpy.linalg.norm(data[i] - center[j])
if temp_dist < dist:
dist = temp_dist
ind = j
idx[i] = ind
# update centriod
center_new = center
for i in range(N):
c = idx[i]
count[c] += 1
eta = 1/count[c]
center_new[c] = (1 - eta) * center_new[c] + eta * data[i]
return idx, center_new, count
def test_SdC(Init = '', lbd = .01, output_dir='MNIST_results', save_file = '', beta = 1, finetune_lr= .005, mu = 0.9, pretraining_epochs=50,
pretrain_lr=.001, training_epochs=150,
dataset='toy.pkl.gz', batch_size=20, nClass = 4, hidden_dim = [100, 50, 2], diminishing = True):
"""
:type Init: string
:param Init: a string contains the filename of a saved network, the saved network can be loaded to initialize
the network. Leave this parameter be an empty string if no saved network available. If failed to
find the specified file, the program will initialized the network randomly.
:type lbd: float
:param lbd: tuning parameter, multiplied on reconstruction error, i.e. the larger
lbd the larger weight on minimizing reconstruction error.
:type output_dir: string
:param output_dir: the location to save trained network
:type save_file: string
:param save_file: the filename to save trained network
:type beta: float
:param beta: the parameter for the clustering term, set to 0 if a pure SAE (without clustering regularization)
is intended.
:type finetune_lr: float
:param finetune_lr: learning rate used in the finetune stage
(factor for the stochastic gradient)
:type mu: float
:param mu: extrapolation parameter used for implementing Nesterov-type acceleration
:type pretraining_epochs: int
:param pretraining_epochs: number of epoch to do pretraining
:type pretrain_lr: float
:param pretrain_lr: learning rate to be used during pre-training
:type training_epochs: int
:param training_epochs: number of epoch to do optimization
:type dataset: string
:param dataset: path of the pickled dataset
:type batch_size: int
:param batch_size: number of data samples in one minibatch
:type nClass: int
:param nClass: number of clusters
:hidden dim: array
:param hidden_dim: the number of neurons in each hidden layer in the forward network, the reconstruction part
has a mirror-image structure
:type diminishing: boolean
:param diminishing: whether or not to reduce learning rate during optimization, if True, the learning rate is
halfed every 5 epochs.
"""
datasets = load_data_shared(dataset, batch_size)
working_dir = os.getcwd()
train_set_x, train_set_y = datasets[0]
inDim = train_set_x.get_value().shape[1]
label_true = numpy.squeeze(numpy.int32(train_set_y.get_value(borrow=True)))
index = T.lscalar()
x = T.matrix('x')
# compute number of minibatches for training, validation and testing
n_train_samples = train_set_x.get_value(borrow=True).shape[0]
n_train_batches = n_train_samples
n_train_batches /= batch_size
# numpy random generator
# start-snippet-3
numpy_rng = numpy.random.RandomState(89677)
# numpy_rng = numpy.random.RandomState()
print '... building the model'
try:
os.chdir(output_dir)
except OSError:
os.mkdir(output_dir)
os.chdir(output_dir)
# construct the stacked denoising autoencoder class
if Init == '':
sdc = SdC(
numpy_rng=numpy_rng,
n_ins=inDim,
lbd = lbd,
beta = beta,
input=x,
hidden_layers_sizes= hidden_dim
)
else:
try:
with gzip.open(Init, 'rb') as f:
saved_params = cPickle.load(f)['network']
sdc = SdC(
numpy_rng=numpy_rng,
n_ins=inDim,
lbd = lbd,
beta = beta,
input=x,
hidden_layers_sizes= hidden_dim,
Param_init = saved_params
)
print '... loading saved network succeeded'
except IOError:
print >> sys.stderr, ('Cannot find the specified saved network, using random initializations.')
sdc = SdC(
numpy_rng=numpy_rng,
n_ins=inDim,
lbd = lbd,
beta = beta,
input=x,
hidden_layers_sizes= hidden_dim
)
#########################
# PRETRAINING THE MODEL #
#########################
if pretraining_epochs == 0 or Init != '':
print '... skipping pretraining'
else:
print '... getting the pretraining functions'
pretraining_fns = sdc.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size, mu = mu)
print '... pre-training the model'
start_time = timeit.default_timer()
## Pre-train layer-wise
corruption_levels = 0*numpy.ones(len(hidden_dim), dtype = numpy.float32)
pretrain_lr_shared = theano.shared(numpy.asarray(pretrain_lr,
dtype='float32'),
borrow=True)
for i in xrange(sdc.n_layers):
# go through pretraining epochs
iter = 0
for epoch in xrange(pretraining_epochs):
# go through the training set
c = []
for batch_index in xrange(n_train_batches):
iter = (epoch) * n_train_batches + batch_index
pretrain_lr_shared.set_value( numpy.float32(pretrain_lr) )
cost = pretraining_fns[i](index=batch_index,
corruption=corruption_levels[i],
lr=pretrain_lr_shared.get_value())
c.append(cost)
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print numpy.mean(c)
end_time = timeit.default_timer()
print >> sys.stderr, ('The pretraining code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
network = [param.get_value() for param in sdc.params]
package = {'network': network}
with gzip.open('deepclus_'+str(nClass)+ '_pretrain.pkl.gz', 'wb') as f:
cPickle.dump(package, f, protocol=cPickle.HIGHEST_PROTOCOL)
########################
# FINETUNING THE MODEL #
########################
km = KMeans(n_clusters = nClass)
out = sdc.get_output()
out_sdc = theano.function(
[index],
outputs = out,
givens = {x: train_set_x[index * batch_size: (index + 1) * batch_size]}
)
hidden_val = []
for batch_index in xrange(n_train_batches):
hidden_val.append(out_sdc(batch_index))
hidden_array = numpy.asarray(hidden_val)
hidden_size = hidden_array.shape
hidden_array = numpy.reshape(hidden_array, (hidden_size[0] * hidden_size[1], hidden_size[2] ))
hidden_zero = numpy.zeros_like(hidden_array)
zeros_count = numpy.sum(numpy.equal(hidden_array, hidden_zero), axis = 0)
# # Do a k-means clusering to get center_array
km_idx = km.fit_predict(hidden_array)
centers = km.cluster_centers_.astype(numpy.float32)
center_shared = theano.shared(numpy.zeros((batch_size, hidden_dim[-1]) ,
dtype='float32'),
borrow=True)
nmi = metrics.normalized_mutual_info_score(label_true, km_idx)
print >> sys.stderr, ('Initial NMI for deep clustering: %.2f' % (nmi))
ari = metrics.adjusted_rand_score(label_true, km_idx)
print >> sys.stderr, ('ARI for deep clustering: %.2f' % (ari))
try:
ac = acc(km_idx, label_true)
except AssertionError:
ac = 0
print('Number of predicted cluster mismatch with ground truth.')
print >> sys.stderr, ('ACC for deep clustering: %.2f' % (ac))
lr_shared = theano.shared(numpy.asarray(finetune_lr,
dtype='float32'),
borrow=True)
print '... getting the finetuning functions'
train_fn = sdc.build_finetune_functions(
datasets=datasets,
centers=center_shared ,
batch_size=batch_size,
mu = mu,
learning_rate=lr_shared
)
print '... finetunning the model'
start_time = timeit.default_timer()
done_looping = False
epoch = 0
res_metrics = numpy.zeros((training_epochs/5 + 1, 3), dtype = numpy.float32)
res_metrics[0] = numpy.array([nmi, ari, ac])
count = 100*numpy.ones(nClass, dtype = numpy.int)
while (epoch < training_epochs) and (not done_looping):
epoch = epoch + 1
c = [] # total cost
d = [] # cost of reconstruction
e = [] # cost of clustering
f = [] # learning_rate
g = []
# count the number of assigned data sample
# perform random initialization of centroid if empty cluster happens
count_samples = numpy.zeros((nClass))
for minibatch_index in xrange(n_train_batches):
# calculate the stepsize
iter = (epoch - 1) * n_train_batches + minibatch_index
lr_shared.set_value( numpy.float32(finetune_lr) )
center_shared.set_value(centers[km_idx[minibatch_index * batch_size: (minibatch_index +1 ) * batch_size]])
# lr_shared.set_value( numpy.float32(finetune_lr/numpy.sqrt(epoch)) )
cost = train_fn(minibatch_index)
hidden_val = out_sdc(minibatch_index) # get the hidden value, to update KM
# Perform mini-batch KM
temp_idx, centers, count = batch_km(hidden_val, centers, count)
# for i in range(nClass):
# count_samples[i] += temp_idx.shape[0] - numpy.count_nonzero(temp_idx - i)
# center_shared.set_value(numpy.float32(temp_center))
km_idx[minibatch_index * batch_size: (minibatch_index +1 ) * batch_size] = temp_idx
c.append(cost[0])
d.append(cost[1])
e.append(cost[2])
f.append(cost[3])
g.append(cost[4])
# check if empty cluster happen, if it does random initialize it
# for i in range(nClass):
# if count_samples[i] == 0:
# rand_idx = numpy.random.randint(low = 0, high = n_train_samples)
# # modify the centroid
# centers[i] = out_single(rand_idx)
print 'Fine-tuning epoch %d ++++ \n' % (epoch),
print ('Total cost: %.5f, '%(numpy.mean(c)) + 'Reconstruction: %.5f, ' %(numpy.mean(d))
+ "Clustering: %.5f, " %(numpy.mean(e)) )
# print 'Learning rate: %.6f' %numpy.mean(f)
# half the learning rate every 5 epochs
if epoch % 10 == 0 and diminishing == True:
finetune_lr /= 2
# evaluate the clustering performance every 5 epoches
if epoch % 5 == 0:
nmi = metrics.normalized_mutual_info_score(label_true, km_idx)
ari = metrics.adjusted_rand_score(label_true, km_idx)
try:
ac = acc(km_idx, label_true)
except AssertionError:
ac = 0
print('Number of predicted cluster mismatch with ground truth.')
res_metrics[epoch/5] = numpy.array([nmi, ari, ac])
# get the hidden values, to make a plot
hidden_val = []
for batch_index in xrange(n_train_batches):
hidden_val.append(out_sdc(batch_index))
hidden_array = numpy.asarray(hidden_val)
hidden_size = hidden_array.shape
hidden_array = numpy.reshape(hidden_array, (hidden_size[0] * hidden_size[1], hidden_size[2] ))
err = numpy.mean(d)
print >> sys.stderr, ('Average squared 2-D reconstruction error: %.4f' %err)
end_time = timeit.default_timer()
ypred = km_idx
nmi = metrics.normalized_mutual_info_score(label_true, ypred)
print >> sys.stderr, ('NMI for deep clustering: %.2f' % (nmi))
ari = metrics.adjusted_rand_score(label_true, ypred)
print >> sys.stderr, ('ARI for deep clustering: %.2f' % (ari))
try:
ac = acc(ypred, label_true)
except AssertionError:
ac = 0
print('Number of predicted cluster mismatch with ground truth.')
print >> sys.stderr, ('ACC for deep clustering: %.2f' % (ac))
config = {'lbd': lbd,
'beta': beta,
'pretraining_epochs': pretraining_epochs,
'pretrain_lr': pretrain_lr,
'mu': mu,
'finetune_lr': finetune_lr,
'training_epochs': training_epochs,
'dataset': dataset,
'batch_size': batch_size,
'nClass': nClass,
'hidden_dim': hidden_dim}
results = {'result': res_metrics}
network = [param.get_value() for param in sdc.params]
package = {'config': config,
'results': results,
'network': network}
with gzip.open(save_file, 'wb') as f:
cPickle.dump(package, f, protocol=cPickle.HIGHEST_PROTOCOL)
os.chdir(working_dir)
print >> sys.stderr, ('The training code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
return res_metrics
if __name__ == '__main__':
# run experiment with raw MNIST data
K = 10
filename = 'mnist_dcn.pkl.gz'
path = '/home/bo/Data/MNIST/'
trials = 1
dataset = path+filename
config = {'Init': '',
'lbd': .05,
'beta': 1,
'output_dir': 'MNIST_results',
'save_file': 'mnist_10.pkl.gz',
'pretraining_epochs': 50,
'pretrain_lr': .01,
'mu': 0.9,
'finetune_lr': 0.05,
'training_epochs': 50,
'dataset': dataset,
'batch_size': 128,
'nClass': K,
'hidden_dim': [2000, 1000, 500, 500, 250, 50],
'diminishing': False}
results = []
for i in range(trials):
res_metrics = test_SdC(**config)
results.append(res_metrics)
results_SAEKM = numpy.zeros((trials, 3))
results_DCN = numpy.zeros((trials, 3))
N = config['training_epochs']/5
for i in range(trials):
results_SAEKM[i] = results[i][0]
results_DCN[i] = results[i][N]
SAEKM_mean = numpy.mean(results_SAEKM, axis = 0)
SAEKM_std = numpy.std(results_SAEKM, axis = 0)
DCN_mean = numpy.mean(results_DCN, axis = 0)
DCN_std = numpy.std(results_DCN, axis = 0)
print >> sys.stderr, ('SAE+KM avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(SAEKM_mean[0],
SAEKM_mean[1], SAEKM_mean[2]) )
print >> sys.stderr, ('DCN avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(DCN_mean[0],
DCN_mean[1], DCN_mean[2]) )
color = ['b', 'g', 'r']
marker = ['o', '+', '*']
x = numpy.linspace(0, config['training_epochs'], num = config['training_epochs']/5 +1)
plt.figure(3)
plt.xlabel('Epochs')
for i in range(3):
y = res_metrics[:][:,i]
plt.plot(x, y, '-'+color[i]+marker[i], linewidth = 2)
plt.show()
plt.legend(['NMI', 'ARI', 'ACC']) | 36,518 | 37.48156 | 140 | py |
DCN | DCN-master/run_rcv1.py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 11 07:45:42 2016
Experiments on RCV1-v2
@author: bo
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from multi_layer_km import test_SdC, load_data
from cluster_acc import acc
trials = 1
# i = [0, 1, 2, 3, 4], corresponds to [4, 8, 12, 16, 20] clusters
i = 0
filename = 'data-'+str(i)+'.pkl.gz'
K = (i+1)*4
path = '/home/bo/Data/RCV1/Processed/'
dataset = path+filename
#np.random.seed(seed = 1)
## perform KM
train_x, train_y = load_data(dataset)
km_model = KMeans(n_clusters = K, n_init = 1)
results_KM = np.zeros((trials, 3))
for i in range(trials):
ypred = km_model.fit_predict(train_x)
nmi = metrics.normalized_mutual_info_score(train_y, ypred)
ari = metrics.adjusted_rand_score(train_y, ypred)
ac = acc(ypred, train_y)
results_KM[i] = np.array([nmi, ari, ac])
KM_mean = np.mean(results_KM, axis = 0)
KM_std = np.std(results_KM, axis = 0)
# perform DCN
# for RCV1
config_1 = {'Init': '',
'lbd': 0.1,
'beta': 1,
'output_dir': 'RCV_results',
'save_file': 'rcv_10.pkl.gz',
'pretraining_epochs': 50,
'pretrain_lr': 0.01,
'mu': 0.9,
'finetune_lr': 0.05,
'training_epochs': 50,
'dataset': dataset,
'batch_size': 256,
'nClass': K,
'hidden_dim': [2000, 1000, 1000, 1000, 50],
'diminishing': False}
config_2 = {'Init': '',
'lbd': 0.1,
'beta': 1,
'output_dir': 'RCV_results',
'save_file': 'rcv_10.pkl.gz',
'pretraining_epochs': 50,
'pretrain_lr': 0.01,
'mu': 0.9,
'finetune_lr': 0.05,
'training_epochs': 50,
'dataset': dataset,
'batch_size': 256,
'nClass': K,
'hidden_dim': [2000, 1000, 1000, 1000, 500, 500, 50],
'diminishing': False}
results = []
for i in range(trials):
if K == 4 or K == 8:
# use configuration 1
config = config_1
else:
# use configuration 2
config = config_2
res_metrics = test_SdC(**config)
results.append(res_metrics)
results_SAEKM = np.zeros((trials, 3))
results_DCN = np.zeros((trials, 3))
N = config['training_epochs']/5
for i in range(trials):
results_SAEKM[i] = results[i][0]
results_DCN[i] = results[i][N]
SAEKM_mean = np.mean(results_SAEKM, axis = 0)
SAEKM_std = np.std(results_SAEKM, axis = 0)
DCN_mean = np.mean(results_DCN, axis = 0)
DCN_std = np.std(results_DCN, axis = 0)
color = ['b', 'g', 'r']
marker = ['o', '+', '*']
x = np.linspace(0, config['training_epochs'], num = config['training_epochs']/5 +1)
plt.figure(3)
plt.xlabel('Epochs')
for i in range(3):
y = res_metrics[:][:,i]
plt.plot(x, y, '-'+color[i]+marker[i], linewidth = 2)
plt.show()
plt.legend(['NMI', 'ARI', 'ACC'])
#print >> sys.stderr, ('KM avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(KM_mean[0],
# KM_mean[1], KM_mean[2]) )
print >> sys.stderr, ('SAE+KM avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(SAEKM_mean[0],
SAEKM_mean[1], SAEKM_mean[2]) )
print >> sys.stderr, ('DCN avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(DCN_mean[0],
DCN_mean[1], DCN_mean[2]) )
| 3,493 | 28.116667 | 102 | py |
DCN | DCN-master/run_20News.py | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 9 13:25:23 2016
Script to run experiments on 20Newsgroup
@author: bo
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.manifold import SpectralEmbedding
from multi_layer_km import test_SdC, load_data
from cluster_acc import acc
K = 20
trials = 10
filename = '20News_ncw.pkl.gz'
path = '/home/bo/Data/20News/'
dataset = path+filename
### Perform KM
train_x, train_y = load_data(dataset)
print('KM started...')
results_KM = np.zeros((trials, 3))
km_model = KMeans(n_clusters=K, n_init=1)
for i in range(trials):
ypred = km_model.fit_predict(train_x)
nmi = metrics.normalized_mutual_info_score(train_y, ypred)
ari = metrics.adjusted_rand_score(train_y, ypred)
ac = acc(ypred, train_y)
results_KM[i] = np.array([nmi, ari, ac])
KM_mean = np.mean(results_KM, axis = 0)
KM_std = np.std(results_KM, axis = 0)
## Perform SC: first find the embeddings, then perform 10 KM
print('SC started...')
results_SC = np.zeros((trials, 3))
se_model = SpectralEmbedding(n_components=K, affinity='rbf', gamma = 0.01)
se_vec = se_model.fit_transform(train_x)
for i in range(trials):
ypred = km_model.fit_predict(se_vec)
nmi = metrics.normalized_mutual_info_score(train_y, ypred)
ari = metrics.adjusted_rand_score(train_y, ypred)
ac = acc(ypred, train_y)
results_SC[i] = np.array([nmi, ari, ac])
SC_mean = np.mean(results_SC, axis = 0)
SC_std = np.std(results_SC, axis = 0)
# Perform DCN
print('DCN started...')
config = {'Init': '',
'lbd': 10,
'beta': 1,
'output_dir': '20News',
'save_file': '20News_10.pkl.gz',
'pretraining_epochs': 10,
'pretrain_lr': .01,
'mu': 0.9,
'finetune_lr': 0.001,
'training_epochs': 50,
'dataset': dataset,
'batch_size': 20,
'nClass': K,
'hidden_dim': [250, 100, 20],
'diminishing': False}
results = []
for i in range(trials):
res_metrics = test_SdC(**config)
results.append(res_metrics)
results_SAEKM = np.zeros((trials, 3))
results_DCN = np.zeros((trials, 3))
N = config['training_epochs']/5
for i in range(trials):
results_SAEKM[i] = results[i][0]
results_DCN[i] = results[i][N]
SAEKM_mean = np.mean(results_SAEKM, axis = 0)
SAEKM_std = np.std(results_SAEKM, axis = 0)
DCN_mean = np.mean(results_DCN, axis = 0)
DCN_std = np.std(results_DCN, axis = 0)
color = ['b', 'g', 'r']
marker = ['o', '+', '*']
x = np.linspace(0, config['training_epochs'], num = config['training_epochs']/5 +1)
plt.figure(3)
plt.xlabel('Epochs')
for i in range(3):
y = res_metrics[:][:,i]
plt.plot(x, y, '-'+color[i]+marker[i], linewidth = 2)
plt.show()
plt.legend(['NMI', 'ARI', 'ACC'])
print >> sys.stderr, ('KM avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(KM_mean[0],
KM_mean[1], KM_mean[2]) )
print >> sys.stderr, ('SC avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(SC_mean[0],
SC_mean[1], SC_mean[2]) )
print >> sys.stderr, ('SAE+KM avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(SAEKM_mean[0],
SAEKM_mean[1], SAEKM_mean[2]) )
print >> sys.stderr, ('DCN avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(DCN_mean[0],
DCN_mean[1], DCN_mean[2]) ) | 3,529 | 30.238938 | 102 | py |
DCN | DCN-master/simulation.py | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 19 12:09:48 2016
@author: bo
Create a toy dataset, including train_set
"""
import numpy as np
import gzip
import cPickle
import matplotlib.pyplot as plt
import sys
from sklearn.cluster import SpectralClustering
from sklearn.cluster import KMeans
from sklearn import metrics
from sklearn.decomposition import NMF
from multi_layer_km import test_SdC
from multi_layer_km_nj import test_SdC_NJ
# size of the 3 sets
def sigmoid(x):
return 1/(1 + np.exp(-x))
nClass = 4
num = 1000
sigma = 1.5
dim = 100
c = 10*np.array([[1,1], [0,0], [1, 0], [0, 1]])
np.random.seed(1)
lowD_x = np.zeros((nClass*num, 2))
train_y = np.zeros((nClass*num, 1))
for i in xrange(nClass):
lowD_x[i*num: (i+1) *num] = np.tile(c[i,:], (num,1)) + sigma*np.random.randn(num, 2)
# Class lables: 0, 1, 2...
train_y[i*num: (i+1) *num] = i*np.ones((num, 1))
train_y0 = train_y
# tanh(Wx), linear mapping with tanh() nonlinearity
W = np.random.randn(100, 2)
train_x = sigmoid(np.fmax(0, np.dot(lowD_x, W.T)))
#W1 = np.random.randn(10, 2)
#W2 = np.random.randn(100, 10)
#t1 = sigmoid(np.dot(lowD_x, W1.T))
#t2 = sigmoid(np.dot(t1, W2.T))
#train_x = t2
#W = np.random.randn(100, 2)
#train_x = np.tanh(sigmoid(np.dot(lowD_x, W.T)))
#W = np.random.randn(100, 2)
#train_x = np.power(sigmoid(np.dot(lowD_x, W.T)), 2)
#l1 = np.maximum(np.dot(lowD_x, W.T), 0)
#train_x = np.tanh(np.dot(l1, np.random.randn(dim, dim)))
#train_x = train_x/np.amax(train_x)
### Circle data
#theta = np.linspace(0, 2 * np.pi, num)
#lowD_x = np.zeros((nClass*num, 2))
## class 1 center: (2, 2), radius: 1
#lowD_x[0: num] = np.array([2 + np.sin(theta), 2 + np.cos(theta)]).T
#lowD_x[num: 2*num] = np.array([2 + 1.5*np.sin(theta), 2 + 1.5*np.cos(theta)]).T
#train_x = lowD_x
# shuffling
data = np.concatenate((train_x, train_y), axis = 1)
np.random.shuffle(data)
train_x = data[:][:,0:dim] + 0.0 * np.random.randn(nClass*num, dim)
train_y = np.int32(data[:][:, -1])
## find the result of PCA
## centering
center_x = train_x - np.tile(np.mean(train_x, axis = 0), (nClass*num, 1))
# svd
U, S, V = np.linalg.svd(center_x)
#
## Calculate rank-2 reconstruction error
#A = np.dot(U[:][:, 0:2], np.diag(S[0:2]))
#AA = np.dot(A, V[0:2])
#Err = np.mean(np.sum(np.power(center_x - AA, 2), axis = 1))
#
#print >> sys.stderr, ('Average squared rank-2 PCA reconstruction error: %.4f' %Err)
#
## Perform a NMF
nmf_model = NMF(n_components=2, init='random')
WW = nmf_model.fit_transform(train_x)
color = ['b', 'g', 'r', 'm', 'k', 'b', 'g', 'r', 'm', 'k']
marker = ['o', '+','o', '+','o', '+','o', '+','o', '+']
# show ground-truth
data_to_plot = lowD_x
for i in xrange(nClass):
idx_x = data_to_plot[np.nonzero(train_y0 == i), 0]
idx_y = data_to_plot[np.nonzero(train_y0 == i), 1]
plt.figure(0)
plt.scatter(idx_x, idx_y, s = 70, c = color[i], marker = marker[i], label = '%s'%i)
plt.legend()
plt.show()
## show result by PCA
data_to_plot = U
for i in xrange(nClass):
idx_x = data_to_plot[np.nonzero(train_y == i), 0]
idx_y = data_to_plot[np.nonzero(train_y == i), 1]
plt.figure(1)
plt.scatter(idx_x, idx_y, s = 70, c = color[i], marker = marker[i], label = '%s'%i)
plt.legend()
plt.show()
#
## show result by NMF
data_to_plot = WW
for i in xrange(nClass):
idx_x = data_to_plot[np.nonzero(train_y == i), 0]
idx_y = data_to_plot[np.nonzero(train_y == i), 1]
plt.figure(2)
plt.scatter(idx_x, idx_y, s = 70, c = color[i], marker = marker[i], label = '%s'%i)
plt.legend()
plt.show()
## Perform spectral clustering
sc = SpectralClustering(n_clusters= nClass, n_init=10, gamma=0.1, affinity='rbf',
n_neighbors=3, assign_labels='kmeans', degree=3, coef0=1, kernel_params=None)
ypred = sc.fit_predict(train_x)
nmi_sc = metrics.adjusted_mutual_info_score(train_y, ypred)
ari_sc = metrics.adjusted_rand_score(train_y, ypred)
print >> sys.stderr, ('NMI for spectral clustering: %.2f' % (nmi_sc))
print >> sys.stderr, ('ARI for spectral clustering: %.2f' % (ari_sc))
## Perform KMeans
km = KMeans(n_clusters= nClass, init='k-means++', n_init=10)
ypred = km.fit_predict(train_x)
nmi_km = metrics.adjusted_mutual_info_score(train_y, ypred)
ari_km = metrics.adjusted_rand_score(train_y, ypred)
print >> sys.stderr, ('NMI for Kmeans: %.2f' % (nmi_km))
print >> sys.stderr, ('ARI for Kmeans: %.2f' % (ari_km))
train_set = train_x, train_y
dataset = [train_set, train_set, train_set]
f = gzip.open('toy.pkl.gz','wb')
cPickle.dump(dataset, f, protocol=2)
f.close()
nmi_dc, ari_dc = test_SdC(lbd = .1, finetune_lr= .05, mu = 0.9, pretraining_epochs=50,
pretrain_lr=0.01, training_epochs=100,
dataset='toy.pkl.gz', batch_size=20, nClass = nClass,
hidden_dim = [100, 50, 10, 2])
#
print >> sys.stderr, ('NMI for spectral clustering: %.2f' % (nmi_sc))
print >> sys.stderr, ('ARI for spectral clustering: %.2f' % (ari_sc))
print >> sys.stderr, ('NMI for deep clustering: %.2f' % (nmi_dc))
print >> sys.stderr, ('ARI for deep clustering: %.2f' % (ari_dc))
#nmi_nj, ari_nj = test_SdC_NJ(lbd = 0, finetune_lr= .01, mu = 0.9, pretraining_epochs=50,
# pretrain_lr=.01, training_epochs=100,
# dataset='toy.pkl.gz', batch_size=20, nClass = nClass, hidden_dim = [100, 50, 10, 2])
#
#print >> sys.stderr, ('NMI for SAE + Kmeans: %.2f' % (nmi_nj))
#print >> sys.stderr, ('ARI for SAE + Kmeans: %.2f' % (ari_nj))
## Working configuration
## W = np.random.randn(100, 2)
## train_x = np.power(sigmoid(np.dot(lowD_x, W.T)), 2)
#
#test_SdC(lbd = .1, finetune_lr= .01, mu = 0.9, pretraining_epochs=50,
# pretrain_lr=0.01, training_epochs=100,
# dataset='toy.pkl.gz', batch_size=20, nClass = nClass,
# hidden_dim = [100, 50, 10, 2])
##
#print >> sys.stderr, ('NMI for spectral clustering: %.2f' % (nmi_sc))
#print >> sys.stderr, ('ARI for spectral clustering: %.2f' % (ari_sc))
## Working configuration
## W = np.random.randn(100, 2)
## train_x = np.tanh(sigmoid(np.dot(lowD_x, W.T)))
#test_SdC(lbd = 0.2, finetune_lr= .01, mu = 0.9, pretraining_epochs=50,
# pretrain_lr=0.5, training_epochs=100,
# dataset='toy.pkl.gz', batch_size=20, nClass = nClass,
# hidden_dim = [100, 50, 10, 2])
#print >> sys.stderr, ('NMI for spectral clustering: %.2f' % (nmi_sc))
#print >> sys.stderr, ('ARI for spectral clustering: %.2f' % (ari_sc))
## Working configuration
#W1 = np.random.randn(10, 2)
#W2 = np.random.randn(100, 10)
#t1 = sigmoid(np.dot(lowD_x, W1.T))
#t2 = sigmoid(np.dot(t1, W2.T))
#train_x = t2
# nmi_dc, ari_dc = test_SdC(lbd = 1, finetune_lr= .05, mu = 0.9, pretraining_epochs=50,
# pretrain_lr=0.01, training_epochs=100,
# dataset='toy.pkl.gz', batch_size=20, nClass = nClass,
# hidden_dim = [100, 50, 10, 2])
| 6,915 | 30.870968 | 101 | py |
DCN | DCN-master/retrieve.py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 30 22:15:30 2016
retrive the saved results
@author: bo
"""
import cPickle, gzip
saved_file = 'deepclus_2_clusters.pkl.gz'
with gzip.open(saved_file, 'rb') as f:
content = cPickle.load(f)
| 243 | 14.25 | 41 | py |
DCN | DCN-master/cluster_acc.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 27 14:31:40 2016
@author: bo
"""
from sklearn.utils.linear_assignment_ import linear_assignment
import numpy as np
def acc(ypred, y):
"""
Calculating the clustering accuracy. The predicted result must have the same number of clusters as the ground truth.
ypred: 1-D numpy vector, predicted labels
y: 1-D numpy vector, ground truth
The problem of finding the best permutation to calculate the clustering accuracy is a linear assignment problem.
This function construct a N-by-N cost matrix, then pass it to scipy.optimize.linear_sum_assignment to solve the assignment problem.
"""
assert len(y) > 0
assert len(np.unique(ypred)) == len(np.unique(y))
s = np.unique(ypred)
t = np.unique(y)
N = len(np.unique(ypred))
C = np.zeros((N, N), dtype = np.int32)
for i in range(N):
for j in range(N):
idx = np.logical_and(ypred == s[i], y == t[j])
C[i][j] = np.count_nonzero(idx)
# convert the C matrix to the 'true' cost
Cmax = np.amax(C)
C = Cmax - C
#
indices = linear_assignment(C)
row = indices[:][:, 0]
col = indices[:][:, 1]
# calculating the accuracy according to the optimal assignment
count = 0
for i in range(N):
idx = np.logical_and(ypred == s[row[i]], y == t[col[i]] )
count += np.count_nonzero(idx)
return 1.0*count/len(y)
if __name__ == '__main__':
"""
Using accuracy to evaluate clustering is usually not a good idea, the following example shows that
even a completely wrong assignment yield accuracy of 0.5.
Consider use more standard metrics, such as NMI or ARI.
"""
s = np.array([1, 2, 2 ,3, 1, 3])
t = np.array([1, 1, 2,2, 3, 3])
ac = acc(s, t)
| 1,845 | 29.262295 | 135 | py |
DCN | DCN-master/run_pendigits.py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 3 14:48:35 2016
Perform experiments with Pendigits
@author: bo
"""
import sys
import gzip
import cPickle
import numpy as np
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.manifold import SpectralEmbedding
from multi_layer_km import test_SdC
from cluster_acc import acc
K = 10
trials = 10
filename = 'pendigits.pkl.gz'
path = '/home/bo/Data/Pendigits/'
dataset = path+filename
# perform KM
with gzip.open(dataset, 'rb') as f:
train_x, train_y = cPickle.load(f)
km_model = KMeans(n_clusters = K, n_init = 1)
results_KM = np.zeros((trials, 3))
for i in range(trials):
ypred = km_model.fit_predict(train_x)
nmi = metrics.adjusted_mutual_info_score(train_y, ypred)
ari = metrics.adjusted_rand_score(train_y, ypred)
ac = acc(ypred, train_y)
results_KM[i] = np.array([nmi, ari, ac])
KM_mean = np.mean(results_KM, axis = 0)
KM_std = np.std(results_KM, axis = 0)
# Perform SC
print('SC started...')
results_SC = np.zeros((trials, 3))
se_model = SpectralEmbedding(n_components=K, affinity='rbf', gamma = 0.1)
se_vec = se_model.fit_transform(train_x)
for i in range(trials):
ypred = km_model.fit_predict(se_vec)
nmi = metrics.adjusted_mutual_info_score(train_y, ypred)
ari = metrics.adjusted_rand_score(train_y, ypred)
ac = acc(ypred, train_y)
results_SC[i] = np.array([nmi, ari, ac])
SC_mean = np.mean(results_SC, axis = 0)
SC_std = np.std(results_SC, axis = 0)
# for PenDigits, perform DCN and SAE+KM
config = {'Init': '',
'lbd': .5,
'beta': 1,
'output_dir': 'Pendigits',
'save_file': 'pen_10.pkl.gz',
'pretraining_epochs': 50,
'pretrain_lr': 0.01,
'mu': 0.9,
'finetune_lr': 0.01,
'training_epochs': 50,
'dataset': dataset,
'batch_size': 20,
'nClass': K,
'hidden_dim': [50, 16, 10],
'diminishing': False}
results = []
for i in range(trials):
res_metrics = test_SdC(**config)
results.append(res_metrics)
results_SAEKM = np.zeros((trials, 3))
results_DCN = np.zeros((trials, 3))
N = config['training_epochs']/5
for i in range(trials):
results_SAEKM[i] = results[i][0]
results_DCN[i] = results[i][N]
SAEKM_mean = np.mean(results_SAEKM, axis = 0)
SAEKM_std = np.std(results_SAEKM, axis = 0)
DCN_mean = np.mean(results_DCN, axis = 0)
DCN_std = np.std(results_DCN, axis = 0)
print >> sys.stderr, ('KM avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(KM_mean[0],
KM_mean[1], KM_mean[2]) )
print >> sys.stderr, ('SC avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(SC_mean[0],
SC_mean[1], SC_mean[2]) )
print >> sys.stderr, ('SAE+KM avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(SAEKM_mean[0],
SAEKM_mean[1], SAEKM_mean[2]) )
print >> sys.stderr, ('DCN avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(DCN_mean[0],
DCN_mean[1], DCN_mean[2]) ) | 3,133 | 30.656566 | 102 | py |
DCN | DCN-master/multi_layer.py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 24 14:27:50 2016
@author: bo
Multiple-layers Deep Clustering
06/19/2016 Multi-layer autoencoder, without reconstruction, performance is not good, as expected.
06/20/2016 Multi-layer autoencoder, with reconstruction and clustering as loss, seems to give meaningful result on MNIST
06/21/2016 Modified cost output, so that the functions print out cost for both reconstruction and clustering,
added an input lbd, to enable tuning parameter that balancing the two costs--not an easy job.
06/29/2016 Changed how learning-rate (stepsize, both pretraining and finetuning) and center_array are passed and manipulated
by using shared-variable mechanism in Theano. Now the stepsize is diminishing c/sqrt(t), where c is some fixed constant
"""
import os
import sys
import timeit
import numpy
import cPickle
import gzip
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from sklearn import metrics
from sklearn.cluster import MiniBatchKMeans
#from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from utils import tile_raster_images
#from logistic_sgd import LogisticRegression
#from mlp import HiddenLayer
from dA import dA
from deepclustering import load_data
from mlp import HiddenLayer
try:
import PIL.Image as Image
except ImportError:
import Image
#theano.config.compute_test_value = 'warn'
# class dA2 inherited from dA, with loss function modified to norm-square loss
class dA2(dA):
# overload the original function in dA class
def get_cost_updates(self, corruption_level, learning_rate):
""" This function computes the cost and the updates for one trainng
step of the dA """
tilde_x = self.get_corrupted_input(self.x, corruption_level)
y = self.get_hidden_values(tilde_x)
z = self.get_reconstructed_input(y)
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, with one entry per
# example in minibatch
L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
# L = T.sum(T.pow(self.x - z, 2), axis = 1)
cost = T.mean(L)
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(self.params, gparams)
]
return (cost, updates)
# class SdC, main class for deep-clustering
class SdC(object):
def __init__(
self,
numpy_rng,
theano_rng=None,
input = None,
n_ins=784,
lbd = 1,
hidden_layers_sizes=[1000, 200, 10],
corruption_levels=[0, 0, 0]
):
# self.sigmoid_layers = []
self.dA_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
self.lbd = lbd
self.delta = []
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
if input is None:
self.x = T.matrix('x') # the data is presented as rasterized images
else:
self.x = input
self.y = T.ivector('y') # the labels are presented as 1D vector of
for i in xrange(self.n_layers):
# the size of the input is either the number of hidden units of
# the layer below or the input size if we are on the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# the input to this layer is either the activation of the hidden
# layer below or the input of the SdA if you are on the first
# layer
if i == 0:
layer_input = self.x
else:
layer_input = self.dA_layers[-1].get_hidden_values(self.dA_layers[-1].x)
dA_layer = dA2(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i])
self.dA_layers.append(dA_layer)
self.params.extend(dA_layer.params)
delta_i = (theano.shared(value = numpy.zeros((input_size, hidden_layers_sizes[i]), dtype = numpy.float32), borrow=True),
theano.shared(value = numpy.zeros(hidden_layers_sizes[i], dtype = numpy.float32), borrow = True ),
theano.shared(value = numpy.zeros(input_size, dtype = numpy.float32), borrow = True ) )
self.delta.extend(delta_i)
# construct a function that implements one step of finetunining
# compute the cost for second phase of training,
# defined as the negative log likelihood
def get_output(self):
# return self.sigmoid_layers[-1].output
return self.dA_layers[-1].get_hidden_values(self.dA_layers[-1].x)
def get_network_reconst(self):
reconst = self.get_output()
for da in reversed(self.dA_layers):
reconst = T.nnet.sigmoid(T.dot(reconst, da.W_prime) + da.b_prime)
return reconst
def finetune_cost_updates(self, center, mu, learning_rate):
""" This function computes the cost and the updates ."""
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, withd one entry per
# example in minibatch
# Using least-squares loss for both clustering
# No reconstruction cost in this version
network_output = self.get_output()
temp = T.pow(center - network_output, 2)
L = T.sum(temp, axis=1)
# Add the network reconstruction error
z = self.get_network_reconst()
# reconst_err = T.sum(T.pow(self.x - z, 2), axis = 1)
reconst_err = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
L = L + self.lbd*reconst_err
cost1 = T.mean(L)
cost2 = self.lbd*T.mean(reconst_err)
cost3 = cost1 - cost2
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost1, self.params)
# generate the list of updates
updates = []
for param, delta, gparam in zip(self.params, self.delta, gparams):
updates.append( (delta, mu*delta - learning_rate * gparam) )
updates.append( (param, param + mu*mu*delta - (1+mu)*learning_rate*gparam ))
return ((cost1, cost2, cost3, learning_rate), updates)
def pretraining_functions(self, train_set_x, batch_size):
''' Generates a list of functions, each of them implementing one
step in trainnig the dA corresponding to the layer with same index.
The function will require as input the minibatch index, and to train
a dA you just need to iterate, calling the corresponding function on
all minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared variable that contains all datapoints used
for training the dA
:type batch_size: int
:param batch_size: size of a [mini]batch
:type learning_rate: float
:param learning_rate: learning rate used during training for any of
the dA layers
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
corruption_level = T.scalar('corruption') # % of corruption to use
learning_rate = T.scalar('lr') # learning rate to use
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for dA in self.dA_layers:
# get the cost and the updates list
cost, updates = dA.get_cost_updates(corruption_level,
learning_rate)
# compile the theano function
fn = theano.function(
inputs=[
index,
theano.Param(corruption_level, default = 0.2),
theano.Param(learning_rate, default = 0.1)
],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin: batch_end]
}
)
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_functions(self, datasets, center_shared, batch_size, mu, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on
a batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
ONLY TRAINGING IS IMPLEMENTED, VALIDATION AND TESTING TO BE ADDED...
'''
(train_set_x, train_set_y) = datasets[0]
# (valid_set_x, valid_set_y) = datasets[1]
# (test_set_x, test_set_y) = datasets[2]
center= T.matrix('center')
# compute number of minibatches for training, validation and testing
# n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
# n_valid_batches /= batch_size
# n_test_batches = test_set_x.get_value(borrow=True).shape[0]
# n_test_batches /= batch_size
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
cost, updates = self.finetune_cost_updates(
center,
mu,
learning_rate=learning_rate
)
train_fn = theano.function(
inputs=[index],
outputs= cost,
updates=updates,
givens={
self.x: train_set_x[
index * batch_size: (index + 1) * batch_size
],
center: center_shared[index * batch_size: (index + 1) * batch_size]
},
name='train'
)
return train_fn
def test_SdC(lbd = .01, finetune_lr= .005, mu = 0.9, pretraining_epochs=50,
pretrain_lr=.001, training_epochs=150,
dataset='toy.pkl.gz', batch_size=20, nClass = 4, hidden_dim = [100, 50, 2]):
"""
Demonstrates how to train and test a stochastic denoising autoencoder.
This is demonstrated on MNIST.
:type lbd: float
:param lbd: tuning parameter, multiplied on reconstruction error, i.e. the larger
lbd the larger weight on minimizing reconstruction error.
:type learning_rate: float
:param learning_rate: learning rate used in the finetune stage
(factor for the stochastic gradient)
:type pretraining_epochs: int
:param pretraining_epochs: number of epoch to do pretraining
:type pretrain_lr: float
:param pretrain_lr: learning rate to be used during pre-training
:type n_iter: int
:param n_iter: maximal number of iterations ot run the optimizer
:type dataset: string
:param dataset: path the the pickled dataset
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
# valid_set_x, valid_set_y = datasets[1]
# test_set_x, test_set_y = datasets[2]
inDim = train_set_x.get_value().shape[1]
label_true = numpy.int32(train_set_y.get_value(borrow=True))
index = T.lscalar()
x = T.matrix('x')
# x.tag.test_value = numpy.random.rand(50000, 784).astype('float32')
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
# numpy random generator
# start-snippet-3
numpy_rng = numpy.random.RandomState(89677)
print '... building the model'
# construct the stacked denoising autoencoder class
sdc = SdC(
numpy_rng=numpy_rng,
n_ins=inDim,
lbd = lbd,
input=x,
hidden_layers_sizes= hidden_dim,
)
# end-snippet-3 start-snippet-4
#########################
# PRETRAINING THE MODEL #
#########################
print '... getting the pretraining functions'
pretraining_fns = sdc.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size)
print '... pre-training the model'
start_time = timeit.default_timer()
## Pre-train layer-wise
corruption_levels = [.0, .0, .0, 0, 0]
pretrain_lr_shared = theano.shared(numpy.asarray(pretrain_lr,
dtype='float32'),
borrow=True)
for i in xrange(sdc.n_layers):
# go through pretraining epochs
iter = 0
for epoch in xrange(pretraining_epochs):
# go through the training set
c = []
for batch_index in xrange(n_train_batches):
iter = (epoch) * n_train_batches + batch_index
pretrain_lr_shared.set_value( numpy.float32(pretrain_lr) )
# pretrain_lr_shared.set_value( numpy.float32(pretrain_lr/numpy.sqrt(iter + 1)) )
cost = pretraining_fns[i](index=batch_index,
corruption=corruption_levels[i],
lr=pretrain_lr_shared.get_value())
c.append(cost)
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print numpy.mean(c)
end_time = timeit.default_timer()
print >> sys.stderr, ('The pretraining code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
# end-snippet-4
########################
# FINETUNING THE MODEL #
########################
km = MiniBatchKMeans(n_clusters = nClass, batch_size=100)
out = sdc.get_output()
out_sdc = theano.function(
[index],
outputs = out,
givens = {x: train_set_x[index * batch_size: (index + 1) * batch_size]}
)
hidden_val = []
for batch_index in xrange(n_train_batches):
hidden_val.append(out_sdc(batch_index))
hidden_array = numpy.asarray(hidden_val)
hidden_size = hidden_array.shape
hidden_array = numpy.reshape(hidden_array, (hidden_size[0] * hidden_size[1], hidden_size[2] ))
# use the true labels to get initial cluster centers
centers = numpy.zeros((nClass, hidden_size[2]))
for i in xrange(nClass):
temp = hidden_array[label_true == i]
centers[i] = numpy.mean(temp, axis = 0)
center_array = centers[label_true]
# # Do a k-means clusering to get center_array
# ypred = km.fit_predict(hidden_array)
# center_array = km.cluster_centers_[[km.labels_]]
center_shared = theano.shared(numpy.asarray(center_array ,
dtype='float32'),
borrow=True)
lr_shared = theano.shared(numpy.asarray(finetune_lr,
dtype='float32'),
borrow=True)
print '... getting the finetuning functions'
train_fn = sdc.build_finetune_functions(
datasets=datasets,
center_shared=center_shared,
batch_size=batch_size,
mu = mu,
learning_rate=lr_shared
)
print '... finetunning the model'
# early-stopping parameters
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < training_epochs) and (not done_looping):
epoch = epoch + 1
c = [] # total cost
d = [] # cost of reconstruction
e = [] # cost of clustering
f = [] # learning_rate
g = []
for minibatch_index in xrange(n_train_batches):
# calculate the stepsize
iter = (epoch - 1) * n_train_batches + minibatch_index
lr_shared.set_value( numpy.float32(finetune_lr) )
# lr_shared.set_value( numpy.float32(finetune_lr/numpy.sqrt(epoch)) )
cost = train_fn(minibatch_index)
aa = sdc.dA_layers[0].W.get_value()
c.append(cost[0])
d.append(cost[1])
e.append(cost[2])
f.append(cost[3])
# gg = cost[4]
# g.append(gg)
# Do a k-means clusering to get center_array
hidden_val = []
for batch_index in xrange(n_train_batches):
hidden_val.append(out_sdc(batch_index))
hidden_array = numpy.asarray(hidden_val)
hidden_size = hidden_array.shape
hidden_array = numpy.reshape(hidden_array, (hidden_size[0] * hidden_size[1], hidden_size[2] ))
km.fit(hidden_array)
center_array = km.cluster_centers_[[km.labels_]]
center_shared.set_value(numpy.asarray(center_array, dtype='float32'))
# center_shared = theano.shared(numpy.asarray(center_array ,
# dtype='float32'),
# borrow=True)
print 'Fine-tuning epoch %d ++++ \n' % (epoch),
print ('Total cost: %.5f, '%(numpy.mean(c)) + 'Reconstruction: %.5f, ' %(numpy.mean(d))
+ "Clustering: %.5f, " %(numpy.mean(e)) )
# print 'Learning rate: %.6f' %numpy.mean(f)
err = numpy.mean(d)
print >> sys.stderr, ('Average squared 2-D reconstruction error: %.4f' %err)
end_time = timeit.default_timer()
ypred = km.predict(hidden_array)
nmi_dc = metrics.adjusted_mutual_info_score(label_true, ypred)
print >> sys.stderr, ('NMI for deep clustering: %.2f' % (nmi_dc))
ari_dc = metrics.adjusted_rand_score(label_true, ypred)
print >> sys.stderr, ('ARI for deep clustering: %.2f' % (nmi_dc))
# print(
# (
# 'Optimization complete with best validation score of %f %%, '
# 'on iteration %i, '
# 'with test performance %f %%'
# )
# % (best_validation_loss * 100., best_iter + 1, test_score * 100.)
# )
f = open('deepclus.save', 'wb')
cPickle.dump([param.get_value() for param in sdc.params], f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
print >> sys.stderr, ('The training code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
color = ['b', 'g', 'r', 'm', 'k', 'b', 'g', 'r', 'm', 'k']
marker = ['o', '+','o', '+','o', '+','o', '+','o', '+']
# Take 500 samples to plot
data_to_plot = hidden_array[0:1999]
label_plot = label_true[0:1999]
x = data_to_plot[:, 0]
y = data_to_plot[:, 1]
for i in xrange(nClass):
idx_x = x[numpy.nonzero(label_plot == i)]
idx_y = y[numpy.nonzero(label_plot == i)]
plt.figure(3)
plt.scatter(idx_x, idx_y, s = 70, c = color[i], marker = marker[i], label = '%s'%i)
plt.legend()
plt.show()
if dataset == 'toy.pkl.gz':
x = train_set_x.get_value()[:, 0]
y = train_set_x.get_value()[:, 1]
# using resulted label, and the original data
pred_label = ypred[0:1999]
for i in xrange(nClass):
idx_x = x[numpy.nonzero( pred_label == i)]
idx_y = y[numpy.nonzero( pred_label == i)]
plt.figure(4)
plt.scatter(idx_x, idx_y, s = 70, c = color[i], marker = marker[i], label = '%s'%i)
plt.legend()
plt.show()
if __name__ == '__main__':
test_SdC(lbd = 1, finetune_lr= .1, pretraining_epochs=50,
pretrain_lr=1, training_epochs=100,
dataset='toy.pkl.gz', batch_size=20, nClass = 4,
hidden_dim = [100, 50, 20])
| 21,375 | 37.035587 | 142 | py |
DCN | DCN-master/get_a_init.py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 01:04:40 2016
@author: bo
run and save a dA model, to initialize my deep_clus model
"""
import dA
dA.test_dA() | 164 | 12.75 | 57 | py |
DCN | DCN-master/run_raw_mnist.py | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 9 21:56:33 2016
Perform experiment on Raw-MNIST data
@author: bo
"""
import gzip
import cPickle
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans, metrics
from multi_layer_km import test_SdC
from cluster_acc import acc
K = 10
trials = 10
filename = 'mnist_dcn.pkl.gz'
path = '/home/bo/Data/MNIST/'
dataset = path+filename
## perform KM
with gzip.open(dataset, 'rb') as f:
train_x, train_y = cPickle.load(f)
km_model = KMeans(n_clusters = K, n_init = 1)
results_KM = np.zeros((trials, 3))
for i in range(trials):
ypred = km_model.fit_predict(train_x)
nmi = metrics.adjusted_mutual_info_score(train_y, ypred)
ari = metrics.adjusted_rand_score(train_y, ypred)
ac = acc(ypred, train_y)
results_KM[i] = np.array([nmi, ari, ac])
KM_mean = np.mean(results_KM, axis = 0)
KM_std = np.std(results_KM, axis = 0)
# perform DCN
config = {'Init': '',
'lbd': .05,
'beta': 1,
'output_dir': 'MNIST_results',
'save_file': 'mnist_10.pkl.gz',
'pretraining_epochs': 50,
'pretrain_lr': .01,
'mu': 0.9,
'finetune_lr': 0.05,
'training_epochs': 50,
'dataset': dataset,
'batch_size': 128,
'nClass': K,
'hidden_dim': [2000, 1000, 500, 500, 250, 50],
'diminishing': False}
results = []
for i in range(trials):
res_metrics = test_SdC(**config)
results.append(res_metrics)
results_SAEKM = np.zeros((trials, 3))
results_DCN = np.zeros((trials, 3))
N = config['training_epochs']/5
for i in range(trials):
results_SAEKM[i] = results[i][0]
results_DCN[i] = results[i][N]
SAEKM_mean = np.mean(results_SAEKM, axis = 0)
SAEKM_std = np.std(results_SAEKM, axis = 0)
DCN_mean = np.mean(results_DCN, axis = 0)
DCN_std = np.std(results_DCN, axis = 0)
color = ['b', 'g', 'r']
marker = ['o', '+', '*']
x = np.linspace(0, config['training_epochs'], num = config['training_epochs']/5 +1)
plt.figure(3)
plt.xlabel('Epochs')
for i in range(3):
y = res_metrics[:][:,i]
plt.plot(x, y, '-'+color[i]+marker[i], linewidth = 2)
plt.show()
plt.legend(['NMI', 'ARI', 'ACC'])
print >> sys.stderr, ('KM avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(KM_mean[0],
KM_mean[1], KM_mean[2]) )
print >> sys.stderr, ('SAE+KM avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(SAEKM_mean[0],
SAEKM_mean[1], SAEKM_mean[2]) )
print >> sys.stderr, ('DCN avg. NMI = {0:.2f}, ARI = {1:.2f}, ACC = {2:.2f}'.format(DCN_mean[0],
DCN_mean[1], DCN_mean[2]) )
| 2,758 | 28.042105 | 102 | py |
DCN | DCN-master/multi_layer_rbm_mmc.py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 24 14:27:50 2016
@author: bo
Multiple-layers Deep Clustering
06/19/2016 Multi-layer autoencoder, without reconstruction, performance is not good, as expected.
06/20/2016 Multi-layer autoencoder, with reconstruction and clustering as loss, seems to give meaningful result on MNIST
06/21/2016 Modified cost output, so that the functions print out cost for both reconstruction and clustering,
added an input lbd, to enable tuning parameter that balancing the two costs--not an easy job.
06/29/2016 Changed how learning-rate (stepsize, both pretraining and finetuning) and center_array are passed and manipulated
by using shared-variable mechanism in Theano. Now the stepsize is diminishing c/sqrt(t), where c is some fixed constant
07/11/2016 Changed to use RBM as pretraining network. Changed dA2 class initialization procedure, to allow for external initialization;
Changed initialization procedure of SdC class. Create a new class HiddenLayer2, to allow for initialization
"""
import os
import sys
import timeit
import scipy.io as sio
import copy
import numpy
import cPickle
import gzip
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from sklearn import metrics
from sklearn.cluster import MiniBatchKMeans
#from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from utils import tile_raster_images
#from logistic_sgd import LogisticRegression
#from mlp import HiddenLayer
from dA import dA
from RBMs_init import RBMs_init
#from deepclustering import load_data
from mlp import HiddenLayer
from multi_layer_km import SdC
from multi_layer_km import batch_km
try:
import PIL.Image as Image
except ImportError:
import Image
#theano.config.compute_test_value = 'warn'
# class dA2 inherited from dA, with loss function modified to norm-square loss
class dA2(dA):
# overload the original function in dA class
def __init__(
self,
numpy_rng,
theano_rng=None,
input=None,
n_visible=784,
n_hidden=500,
W=None,
bhid=None,
bvis=None
):
"""
Initialize the dA class by specifying the number of visible units (the
dimension d of the input ), the number of hidden units ( the dimension
d' of the latent or hidden space ) and the corruption level. The
constructor also receives symbolic variables for the input, weights and
bias. Such a symbolic variables are useful when, for example the input
is the result of some computations, or when weights are shared between
the dA and an MLP layer. When dealing with SdAs this always happens,
the dA on layer 2 gets as input the output of the dA on layer 1,
and the weights of the dA are used in the second stage of training
to construct an MLP.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: number random generator used to generate weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type input: theano.tensor.TensorType
:param input: a symbolic description of the input or None for
standalone dA
:type n_visible: int
:param n_visible: number of visible units
:type n_hidden: int
:param n_hidden: number of hidden units
:type W: theano.tensor.TensorType
:param W: Theano variable pointing to a set of weights that should be
shared belong the dA and another architecture; if dA should
be standalone set this to None
:type bhid: theano.tensor.TensorType
:param bhid: Theano variable pointing to a set of biases values (for
hidden units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
:type bvis: theano.tensor.TensorType
:param bvis: Theano variable pointing to a set of biases values (for
visible units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
# create a Theano random generator that gives symbolic random values
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# note : W' was written as `W_prime` and b' as `b_prime`
if W is None:
# W is initialized with `initial_W` which is uniformely sampled
# from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible))the output of uniform if
# converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
initial_W = numpy.asarray(
numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)
),
dtype=theano.config.floatX
)
else:
initial_W = W
W = theano.shared(value=initial_W, name='W', borrow=True)
if bvis is None:
bvis = theano.shared(
value=numpy.zeros(
n_visible,
dtype=theano.config.floatX
),
borrow=True
)
else:
bvis = theano.shared(
value=bvis,
borrow=True
)
if bhid is None:
bhid = theano.shared(
value=numpy.zeros(
n_hidden,
dtype=theano.config.floatX
),
name='b',
borrow=True
)
else:
bhid = theano.shared(
value=bhid,
name='b',
borrow=True
)
self.W = W
# b corresponds to the bias of the hidden
self.b = bhid
# b_prime corresponds to the bias of the visible
self.b_prime = bvis
# tied weights, therefore W_prime is W transpose
self.W_prime = self.W.T
self.theano_rng = theano_rng
# if no input is given, generate a variable representing the input
if input is None:
# we use a matrix because we expect a minibatch of several
# examples, each example being a row
self.x = T.dmatrix(name='input')
else:
self.x = input
self.params = [self.W, self.b, self.b_prime]
def get_cost_updates(self, corruption_level, learning_rate):
""" This function computes the cost and the updates for one trainng
step of the dA """
tilde_x = self.get_corrupted_input(self.x, corruption_level)
y = self.get_hidden_values(tilde_x)
z = self.get_reconstructed_input(y)
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, with one entry per
# example in minibatch
# L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
L = T.sum(T.pow(self.x - z, 2), axis = 1)
cost = T.mean(L)
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(self.params, gparams)
]
return (cost, updates)
#
## class HiddenLayer2
class HiddenLayer2(HiddenLayer):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
else:
W_values = W
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
else:
b_values = b;
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
# class SdC, main class for deep-clustering
class SdC2(SdC):
def __init__(
self,
numpy_rng,
theano_rng=None,
input = None,
n_ins=784,
lbd = 1,
hidden_layers_sizes=[1000, 200, 10],
corruption_levels=[0, 0, 0],
Param_init = None
):
self.sigmoid_layers = []
self.dA_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
self.lbd = lbd
self.delta = []
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
if input is None:
self.x = T.matrix('x') # the data is presented as rasterized images
else:
self.x = input
self.y = T.ivector('y') # the labels are presented as 1D vector of
for i in xrange(self.n_layers):
# the size of the input is either the number of hidden units of
# the layer below or the input size if we are on the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# the input to this layer is either the activation of the hidden
# layer below or the input of the SdA if you are on the first
# layer
if i == 0:
layer_input = self.x
else:
# layer_input = self.sigmoid_layers[-1].output
layer_input = self.dA_layers[-1].get_hidden_values(self.dA_layers[-1].x)
# Construct a deep_clus layer, collect them together in the dc_layers list
sigmoid_layer = HiddenLayer2(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
W = Param_init[3*i],
b = Param_init[3*i + 1],
activation=T.nnet.sigmoid)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
# its arguably a philosophical question...
# but we are going to only declare that the parameters of the
# sigmoid_layers are parameters of the StackedDAA
# the visible biases in the dA are parameters of those
# dA, but not the SdA
# using the dA2 objects, instead of dA.
# dA2 uses norm-square loss function
dA_layer = dA2(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W = Param_init[3*i],
bhid = Param_init[3*i + 1],
bvis = Param_init[3*i+2])
self.dA_layers.append(dA_layer)
self.params.extend(dA_layer.params)
delta_i = (theano.shared(value = numpy.zeros((input_size, hidden_layers_sizes[i]), dtype = numpy.float32), borrow=True),
theano.shared(value = numpy.zeros(hidden_layers_sizes[i], dtype = numpy.float32), borrow = True ),
theano.shared(value = numpy.zeros(input_size, dtype = numpy.float32), borrow = True ) )
self.delta.extend(delta_i)
def finetune_cost_updates(self, prototypes_y, prototypes_r, mu, learning_rate):
""" This function computes the cost and the updates ."""
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, withd one entry per
# example in minibatch
# Using least-squares loss for both clustering
# No reconstruction cost in this version
network_output = self.get_output()
L = T.sum(T.maximum(0, 1 + T.sum(prototypes_r * network_output, axis = 1)
- T.sum(prototypes_y * network_output, axis = 1) ), axis = 0)
# temp = T.pow(center - network_output, 2)
#
# L = T.sum(temp, axis=1)
# Add the network reconstruction error
z = self.get_network_reconst()
reconst_err = T.sum(T.pow(self.x - z, 2), axis = 1)
# reconst_err = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
L = L + self.lbd*reconst_err
cost1 = T.mean(L)
cost2 = self.lbd*T.mean(reconst_err)
cost3 = cost1 - cost2
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost1, self.params)
# generate the list of updates
updates = []
for param, delta, gparam in zip(self.params, self.delta, gparams):
updates.append( (delta, mu*delta - learning_rate * gparam) )
updates.append( (param, param + mu*mu*delta - (1+mu)*learning_rate*gparam ))
return ((cost1, cost2, cost3, learning_rate), updates)
def build_finetune_functions(self, datasets, prototypes_y, prototypes_r, batch_size, mu, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on
a batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
ONLY TRAINGING IS IMPLEMENTED, VALIDATION AND TESTING TO BE ADDED...
'''
(train_set_x, train_set_y) = datasets[0]
# (valid_set_x, valid_set_y) = datasets[1]
# (test_set_x, test_set_y) = datasets[2]
# center= T.matrix('center')
# compute number of minibatches for training, validation and testing
# n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
# n_valid_batches /= batch_size
# n_test_batches = test_set_x.get_value(borrow=True).shape[0]
# n_test_batches /= batch_size
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
cost, updates = self.finetune_cost_updates(
prototypes_y,
prototypes_r,
mu,
learning_rate=learning_rate
)
train_fn = theano.function(
inputs=[index],
outputs= cost,
updates=updates,
givens={
self.x: train_set_x[
index * batch_size: (index + 1) * batch_size
]
},
name='train'
)
return train_fn
def batch_mmc(prototypes, data, eta, lr):
"""
Implementing MMC clustering
Reference: Zhangyang Wang et.al,
A Joint Optimization Framework of Sparse Coding and Discriminative Clustering, IJCAI 2015
Assuming ROWs of prototypes and data, K prototypes, N data points
"""
K = prototypes.shape[0] # number of clusters
N = data.shape[0] # number of data points
# first column of assignment are the y_i's, second column are the r_i's
new_assignment = numpy.zeros((N, 2), dtype = numpy.int)
new_prototypes = numpy.zeros_like(prototypes, dtype = numpy.float32)
# get the inner product
prod = numpy.dot(prototypes, data.T)
# sorting the rows, in ascending order
ind = numpy.argsort(prod, axis = 0)
new_assignment[:][:, 0] = ind[-1].T
new_assignment[:][:, 1] = ind[-2].T
for k in range(K):
# grad = numpy.zeros_like(prototypes[0])
grad = eta*prototypes[k]
for n in range(N):
if new_assignment[n, 0] == k:
grad = grad - data[n]
elif new_assignment[n,1] == k:
grad = grad + data[n]
else:
continue
new_prototypes[k] = prototypes[k] - lr*grad
return new_assignment, new_prototypes
def load_all_data(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
dataset
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, dataset)
print '... loading data'
# Load the dataset
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
# K-means clustering
train_x = numpy.concatenate((train_set[0], test_set[0], valid_set[0]), axis = 0)
train_y = numpy.concatenate((train_set[1], test_set[1], valid_set[1]), axis = 0)
train_x_reduced = train_x[:][:, 49:650]
# f = gzip.open('MNIST_array', 'wb')
# cPickle.dump([train_x, train_y, train_x_reduced], f, protocol=2)
# f.close()
# S = numpy.linalg.svd(train_x, compute_uv = 1)
## detecting the required rank to preserve 95% energy, the result is 427
# aa = numpy.cumsum(S)
# bb = numpy.sum(S)
# for j in range(len(aa)):
# if aa[j]/bb > 0.95:
# break
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
#return shared_x, T.cast(shared_y, 'int32')
return shared_x, shared_y
data_x, data_y = shared_dataset((train_x_reduced, train_y))
# test_set_x, test_set_y = shared_dataset(test_set)
# valid_set_x, valid_set_y = shared_dataset(valid_set)
# train_set_x, train_set_y = shared_dataset(train_set)
#
# The value 0 won't be used, just use as a placeholder
rval = [(data_x, data_y), 0, 0]
return rval
def test_SdC(lbd = .01, finetune_lr= .005, mu = 0.9, pretraining_epochs=50,
pretrain_lr=.001, mmc_eta = 0.001, mmc_lr = 0.01, training_epochs=150, init = 'rbm_init_30',
dataset='mnist.pkl.gz', batch_size=20, nClass = 10, hidden_dim = [10, 10]):
"""
Demonstrates how to train and test a stochastic denoising autoencoder.
This is demonstrated on MNIST.
:type lbd: float
:param lbd: tuning parameter, multiplied on reconstruction error, i.e. the larger
lbd the larger weight on minimizing reconstruction error.
:type learning_rate: float
:param learning_rate: learning rate used in the finetune stage
(factor for the stochastic gradient)
:type pretraining_epochs: int
:param pretraining_epochs: number of epoch to do pretraining
:type pretrain_lr: float
:param pretrain_lr: learning rate to be used during pre-training
:type n_iter: int
:param n_iter: maximal number of iterations ot run the optimizer
:type dataset: string
:param dataset: path the the pickled dataset
"""
datasets = load_all_data(dataset)
train_set_x, train_set_y = datasets[0]
# valid_set_x, valid_set_y = datasets[1]
# test_set_x, test_set_y = datasets[2]
inDim = train_set_x.get_value().shape[1]
label_true = numpy.int32(train_set_y.get_value(borrow=True))
index = T.lscalar()
x = T.matrix('x')
# load the save model
# f = open('RBMs_init_1000.save', 'rb')
# param_init = cPickle.load(f)
#
# for i in range(len(param_init)):
# param_init[i] = param_init[i]/4
# load the network trained with Hinton's code
var_names = ['vishid', 'hidrecbiases', 'visbiases',
'hidpen', 'penrecbiases', 'hidgenbiases',
'hidpen2', 'penrecbiases2', 'hidgenbiases2',
'hidtop', 'toprecbiases', 'topgenbiases']
mat_init = sio.loadmat(init)
param_init = []
for i in range(12):
param_init.append( numpy.squeeze( numpy.float32(mat_init[var_names[i]]) ) )
numpy_rng = numpy.random.RandomState(125)
# RBMs = RBMs_init(
# numpy_rng=numpy_rng,
# theano_rng=None,
# n_ins=inDim,
# hidden_layers_sizes = hidden_dim, n_outs=10 ,
# )
# RBMs.params = param_init
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
# numpy random generator
# start-snippet-3
print '... building the model'
# construct the stacked denoising autoencoder class
sdc = SdC2(
numpy_rng=numpy_rng,
n_ins=inDim,
lbd = lbd,
input=x,
hidden_layers_sizes= hidden_dim,
Param_init = param_init
)
# end-snippet-3 start-snippet-4
#########################
# Load the initialization #
#########################
# print '... loading the saved initialization network'
# start_time = timeit.default_timer()
#
# for i in xrange(sdc.n_layers):
# sdc.sigmoid_layers[i].W = RBMs.params[3*i]
# sdc.sigmoid_layers[i].b = RBMs.params[3*i + 1]
#
# sdc.dA_layers[i].W = RBMs.params[3*i]
# sdc.dA_layers[i].b = RBMs.params[3*i + 1]
# sdc.dA_layers[i].b_prime = RBMs.params[3*i + 2]
# end-snippet-4
########################
# FINETUNING THE MODEL #
########################
km = MiniBatchKMeans(n_clusters = nClass, batch_size=10000)
out = sdc.get_output()
out_sdc = theano.function(
[index],
outputs = out,
givens = {x: train_set_x[index * batch_size: (index + 1) * batch_size]}
)
hidden_val = []
for batch_index in xrange(n_train_batches):
hidden_val.append(out_sdc(batch_index))
hidden_array = numpy.asarray(hidden_val)
hidden_size = hidden_array.shape
hidden_array = numpy.reshape(hidden_array, (hidden_size[0] * hidden_size[1], hidden_size[2] ))
# # use the true labels to get initial cluster centers
# centers = numpy.zeros((nClass, hidden_size[2]))
#
# for i in xrange(nClass):
# temp = hidden_array[label_true == i]
# centers[i] = numpy.mean(temp, axis = 0)
#
# center_array = centers[label_true]
# # Do a k-means clusering to get center_array
km_idx = km.fit_predict(hidden_array)
# initializing the prototypes
prototypes = numpy.zeros((nClass, hidden_dim[-1]), dtype = numpy.float32)
for k in range(nClass):
data_k = hidden_array[km_idx == k]
U, S, V = numpy.linalg.svd(data_k, full_matrices = 0)
prototypes[k] = V[:][:,0].T
# km_idx = copy.deepcopy(label_true)
# prototypes = numpy.random.randn(nClass, hidden_dim[-1])
nmi_dc = metrics.adjusted_mutual_info_score(label_true, km_idx)
print >> sys.stderr, ('Initial NMI for deep clustering: %.2f' % (nmi_dc))
# centers = numpy.zeros((nClass, hidden_size[2]), dtype = numpy.float32)
#
# for i in xrange(nClass):
# temp = hidden_array[km_idx == i]
# centers[i] = numpy.mean(temp, axis = 0)
## center_array = km.cluster_centers_[[km.labels_]]
# center_shared = theano.shared(numpy.zeros((batch_size, hidden_dim[-1]) ,
# dtype='float32'),
# borrow=True)
prototypes_y_shared = theano.shared(numpy.zeros((batch_size, hidden_dim[-1]) ,
dtype='float32'),
borrow=True)
prototypes_r_shared = theano.shared(numpy.zeros((batch_size, hidden_dim[-1]) ,
dtype='float32'),
borrow=True)
lr_shared = theano.shared(numpy.asarray(finetune_lr,
dtype='float32'),
borrow=True)
print '... getting the finetuning functions'
train_fn = sdc.build_finetune_functions(
datasets=datasets,
prototypes_y = prototypes_y_shared,
prototypes_r = prototypes_r_shared,
batch_size=batch_size,
mu = mu,
learning_rate=lr_shared,
)
print '... finetunning the model'
# early-stopping parameters
start_time = timeit.default_timer()
done_looping = False
epoch = 0
# count = 100*numpy.ones(nClass)
assignment = numpy.zeros((train_set_y.get_value().shape[0], 2), dtype = numpy.int)
while (epoch < training_epochs) and (not done_looping):
epoch = epoch + 1
c = [] # total cost
d = [] # cost of reconstruction
e = [] # cost of clustering
f = [] # learning_rate
g = []
for minibatch_index in range(n_train_batches):
# calculate the stepsize
iter = (epoch - 1) * n_train_batches + minibatch_index
lr_shared.set_value( numpy.float32(finetune_lr/numpy.sqrt(epoch)) )
# center_shared.set_value(centers[km_idx[minibatch_index * batch_size: (minibatch_index +1 ) * batch_size]])
hidden_val = out_sdc(minibatch_index) # get the hidden value, to update KM
# Perform mini-batch MMC
temp_assignment, prototypes = batch_mmc(prototypes, hidden_val, mmc_eta, mmc_lr)
assignment[minibatch_index * batch_size: (minibatch_index +1 ) * batch_size] = temp_assignment
prototypes_y_shared.set_value(prototypes[temp_assignment[:][:,0]])
prototypes_r_shared.set_value(prototypes[temp_assignment[:][:,1]])
cost = train_fn(minibatch_index)
# hidden_val = out_sdc(minibatch_index) # get the hidden value, to update KM
# # Perform mini-batch MMC
# temp_assignment, prototypes = batch_mmc(prototypes, hidden_val, mmc_eta, mmc_lr)
# assignment[minibatch_index * batch_size: (minibatch_index +1 ) * batch_size] = temp_assignment
# temp_idx, centers, count = batch_km(hidden_val, centers, count)
# center_shared.set_value(numpy.float32(temp_center))
# km_idx[minibatch_index * batch_size: (minibatch_index +1 ) * batch_size] = temp_idx
bb = sdc.dA_layers[0].W.get_value()
c.append(cost[0])
d.append(cost[1])
e.append(cost[2])
f.append(cost[3])
# gg = cost[4]
# g.append(cost[4])
# Do a k-means clusering to get center_array
# hidden_val = []
# for batch_index in xrange(n_train_batches):
# hidden_val.append(out_sdc(batch_index))
#
# hidden_array = numpy.asarray(hidden_val)
# hidden_size = hidden_array.shape
# hidden_array = numpy.reshape(hidden_array, (hidden_size[0] * hidden_size[1], hidden_size[2] ))
# km.fit(hidden_array)
# center_array = km.cluster_centers_[[km.labels_]]
# center_shared.set_value(numpy.asarray(center_array, dtype='float32'))
# center_shared = theano.shared(numpy.asarray(center_array ,
# dtype='float32'),
# borrow=True)
print 'Fine-tuning epoch %d ++++ \n' % (epoch),
print ('Total cost: %.5f, '%(numpy.mean(c)) + 'Reconstruction: %.5f, ' %(numpy.mean(d))
+ "Clustering: %.5f, " %(numpy.mean(e)) )
# print 'Learning rate: %.6f' %numpy.mean(f)
# get the hidden values, to make a plot
ypred = assignment[:][:, 0]
hidden_val = []
for batch_index in xrange(n_train_batches):
hidden_val.append(out_sdc(batch_index))
hidden_array = numpy.asarray(hidden_val)
hidden_size = hidden_array.shape
hidden_array = numpy.reshape(hidden_array, (hidden_size[0] * hidden_size[1], hidden_size[2] ))
err = numpy.mean(d)
print >> sys.stderr, ('Average squared 2-D reconstruction error: %.4f' %err)
end_time = timeit.default_timer()
# ypred = km.predict(hidden_array)
nmi_dc = metrics.adjusted_mutual_info_score(label_true, ypred)
print >> sys.stderr, ('NMI for deep clustering: %.2f' % (nmi_dc))
ari_dc = metrics.adjusted_rand_score(label_true, ypred)
print >> sys.stderr, ('ARI for deep clustering: %.2f' % (nmi_dc))
# print(
# (
# 'Optimization complete with best validation score of %f %%, '
# 'on iteration %i, '
# 'with test performance %f %%'
# )
# % (best_validation_loss * 100., best_iter + 1, test_score * 100.)
# )
f = open('deepclus.save', 'wb')
cPickle.dump([param.get_value() for param in sdc.params], f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
print >> sys.stderr, ('The training code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
color = ['b', 'g', 'r', 'm', 'k', 'b', 'g', 'r', 'm', 'k']
marker = ['o', '+','o', '+','o', '+','o', '+','o', '+']
# Take 500 samples to plot
data_to_plot = hidden_array[0:1999]
label_plot = label_true[0:1999]
x = data_to_plot[:, 0]
y = data_to_plot[:, 1]
for i in xrange(nClass):
idx_x = x[numpy.nonzero(label_plot == i)]
idx_y = y[numpy.nonzero(label_plot == i)]
plt.figure(3)
plt.scatter(idx_x, idx_y, s = 70, c = color[i], marker = marker[i], label = '%s'%i)
plt.legend()
plt.show()
a = 0
# if dataset == 'toy.pkl.gz':
# x = train_set_x.get_value()[:, 0]
# y = train_set_x.get_value()[:, 1]
#
# # using resulted label, and the original data
# pred_label = ypred[0:1999]
# for i in xrange(nClass):
# idx_x = x[numpy.nonzero( pred_label == i)]
# idx_y = y[numpy.nonzero( pred_label == i)]
# plt.figure(4)
# plt.scatter(idx_x, idx_y, s = 70, c = color[i], marker = marker[i], label = '%s'%i)
#
# plt.legend()
# plt.show()
# hidden_dim = [1000, 1000, 1000]
# hidden_dim = [1000, 500, 250, 30]
if __name__ == '__main__':
params = {'lbd': 0.05,
'finetune_lr': 0.005,
'mu': 0.9,
'pretraining_epochs': 50,
'pretrain_lr': .1,
'mmc_eta': .001,
'mmc_lr': 1e-6,
'training_epochs': 10,
'init': 'rbm_reduced_30.mat',
'dataset': 'mnist.pkl.gz',
'batch_size': 50,
'nClass': 10,
'hidden_dim': [1000, 500, 250, 30]}
test_SdC(**params)
"""
## best NMI on MNIST 0.55
params = {'lbd': 0.001,
'finetune_lr': 0.001,
'mu': 0.9,
'pretraining_epochs': 50,
'pretrain_lr': .1,
'training_epochs': 100,
'dataset': 'mnist.pkl.gz',
'batch_size': 20,
'nClass': 10,
'hidden_dim': [1000, 500, 250, 12]}
initialize with finetuned-rbms
Minibatch Kmeans, with count initialized as
count = 100*numpy.ones(nClass)
"""
| 36,816 | 37.27131 | 142 | py |
DCN | DCN-master/mnist_loader.py | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 3 18:03:13 2016
Modified from: https://github.com/sorki/python-mnist/blob/master/mnist/loader.py
@author: bo
"""
import os
import struct
from array import array
import numpy as np
class MNIST(object):
def __init__(self, path='.'):
self.path = path
self.test_img_fname = 't10k-images-idx3-ubyte'
self.test_lbl_fname = 't10k-labels-idx1-ubyte'
self.train_img_fname = 'train-images-idx3-ubyte'
self.train_lbl_fname = 'train-labels-idx1-ubyte'
self.test_images = []
self.test_labels = []
self.train_images = []
self.train_labels = []
def load_testing(self):
ims, labels = self.load(os.path.join(self.path, self.test_img_fname),
os.path.join(self.path, self.test_lbl_fname))
self.test_images = ims
self.test_labels = labels
return ims, labels
def load_training(self):
ims, labels = self.load(os.path.join(self.path, self.train_img_fname),
os.path.join(self.path, self.train_lbl_fname))
self.train_images = ims
self.train_labels = labels
return ims, labels
@classmethod
def load(cls, path_img, path_lbl):
with open(path_lbl, 'rb') as file:
magic, size = struct.unpack(">II", file.read(8))
if magic != 2049:
raise ValueError('Magic number mismatch, expected 2049,'
'got {}'.format(magic))
labels = array("B", file.read())
with open(path_img, 'rb') as file:
magic, size, rows, cols = struct.unpack(">IIII", file.read(16))
if magic != 2051:
raise ValueError('Magic number mismatch, expected 2051,'
'got {}'.format(magic))
image_data = array("B", file.read())
images = []
for i in range(size):
images.append([0] * rows * cols)
for i in range(size):
images[i][:] = image_data[i * rows * cols:(i + 1) * rows * cols]
images = np.array(images, dtype = np.float32)/255
labels = np.array(labels)
return images, labels
@classmethod
def display(cls, img, width=28, threshold=200):
render = ''
for i in range(len(img)):
if i % width == 0:
render += '\n'
if img[i] > threshold:
render += '@'
else:
render += '.'
return render | 2,571 | 27.577778 | 80 | py |
DCN | DCN-master/pre_rcv1.py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 25 22:39:02 2016
This script is to pre-process RCV1-V2 dataset
@author: bo
"""
from sklearn.datasets import fetch_rcv1
import scipy.io as sio
import numpy
import gzip, cPickle
import os
target_dir = '/home/bo/Data/RCV1/Processed'
data_home = '/home/bo/Data'
#target_dir = '/project/sidir001/yang4173/Data/RCV1/Processed'
#data_home = '/project/sidir001/yang4173/Data'
cwd = os.getcwd()
data = fetch_rcv1(data_home = data_home, download_if_missing = True)
names = data.target_names
ind = numpy.full(len(names), False, dtype = bool)
f = open(data_home + '/RCV1/rcv1.topics.hier.orig.txt', 'r')
count = 0
for i in range(len(names) + 1):
s = f.readline()
if s[9:12] == 'CAT':
ind[i - 1] = True
count = count + 1
f.close()
labels = data.target[:][:, ind].copy()
labels = labels.toarray()
t = labels.sum(axis = 1, keepdims = False)
single_docs = numpy.where(t == 1)[0]
# keep only the documents with single label
labels = labels[single_docs]
docs = data.data[single_docs]
count = labels.sum(axis = 0, keepdims = False)
ind = numpy.argsort(count)
ind = ind[::-1]
sort_count = count[ind]
# Creat subset of data top-4, top-8, ... top-20
# The first cluster is removed, due to its huge size
os.chdir(target_dir)
# save the whole training set
train_x = docs
train_y = labels.argmax(axis = 1)
sio.savemat('rcv_whole',{'train_x': train_x, 'train_y':train_y})
#
#for i in range(5):
i = 0
t = (i+1)*4+1
ind_sub = labels[:][:, ind[1:t]]
doc_ind = numpy.logical_or.reduce(ind_sub, axis = 1)
train_x = docs[doc_ind]
# pick the most-frequent 2000 features
frequency = numpy.squeeze(numpy.asarray(train_x.sum(axis = 0)))
fre_ind = numpy.argsort(frequency)
fre_ind = fre_ind[::-1]
train_x = train_x[:][:, fre_ind[0:3000]]
train_y_mat = labels[doc_ind]
train_y = train_y_mat.argmax(axis = 1)
with gzip.open('data-'+str(44)+'.pkl.gz', 'wb') as f:
cPickle.dump((train_x, train_y), f, protocol = 2)
sio.savemat('data-'+str(i), {'train_x': train_x, 'train_y': train_y})
z = 1
os.chdir(cwd)
| 2,072 | 23.678571 | 69 | py |
DCN | DCN-master/preprocess.py | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 12 09:33:52 2016
Perform pre-processing on MNIST dataset
@author: bo
"""
import os
import sys
import timeit
import scipy.io as sio
import copy
import scipy
import numpy
import cPickle
import gzip
from sklearn.neighbors import kneighbors_graph
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.decomposition import NMF
#f = gzip.open('/home/bo/Data/MNIST_array', 'rb')
f = gzip.open('/home/yang4173/Data/MNIST_array', 'rb')
# train_x_reduced means the zeros are taken out directely, resulting a 601 dimension vector for each image
# There are in total 70000 images
train_x, train_y, train_x_reduced = cPickle.load(f)
f.close
# try on a small portion
#train_x = train_x[0:100]
k = 10
D = 1000
# EVD
A = kneighbors_graph(train_x, k)
#eig_val, eig_vec = scipy.sparse.linalg.eigs(A, D)
# NMF
model = NMF(n_components = D)
start_time = timeit.default_timer()
W = model.fit_transform(A)
end_time = timeit.default_timer()
training_time = end_time - start_time
print('The NMF algorithm runs for: %.4f min.' %(training_time/60.))
sio.savemat('reduced', {'W': W, 'train_y': train_y})
#f = gzip.open('preprocessed_mnist_nmf.pkl.gz', 'wb')
#cPickle.dump(W, f, protocol = 2)
#f.close
#
#f = gzip.open('preprocessed_mnist', 'wb')
#cPickle.dump([eig_val, eig_vec], f, protocol = 2)
#f.close | 1,348 | 21.483333 | 106 | py |
DCN | DCN-master/deepclustering.py | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 7 10:38:06 2016
@author: bo
"""
import os
import sys
import timeit
import numpy
import cPickle
import gzip
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from sklearn import metrics
from sklearn.cluster import MiniBatchKMeans
#from sklearn.cluster import KMeans
#import matplotlib.pyplot as plt
from utils import tile_raster_images
#from logistic_sgd import LogisticRegression
#from mlp import HiddenLayer
from dA import dA
try:
import PIL.Image as Image
except ImportError:
import Image
class deep_clus (dA):
"""
Inherit from dA class in denoising autoencoder example.
"""
def __init__(
self,
numpy_rng,
theano_rng=None,
input=None,
n_visible=784,
n_hidden=500,
W=None,
bhid=None,
bvis=None
):
"""
Initialize the dA class by specifying the number of visible units (the
dimension d of the input ), the number of hidden units ( the dimension
d' of the latent or hidden space ) and the corruption level. The
constructor also receives symbolic variables for the input, weights and
bias. Such a symbolic variables are useful when, for example the input
is the result of some computations, or when weights are shared between
the dA and an MLP layer. When dealing with SdAs this always happens,
the dA on layer 2 gets as input the output of the dA on layer 1,
and the weights of the dA are used in the second stage of training
to construct an MLP.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: number random generator used to generate weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type input: theano.tensor.TensorType
:param input: a symbolic description of the input or None for
standalone dA
:type n_visible: int
:param n_visible: number of visible units
:type n_hidden: int
:param n_hidden: number of hidden units
:type W: theano.tensor.TensorType
:param W: Theano variable pointing to a set of weights that should be
shared belong the dA and another architecture; if dA should
be standalone set this to None
:type bhid: theano.tensor.TensorType
:param bhid: Theano variable pointing to a set of biases values (for
hidden units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
:type bvis: theano.tensor.TensorType
:param bvis: Theano variable pointing to a set of biases values (for
visible units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
# create a Theano random generator that gives symbolic random values
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# note : W' was written as `W_prime` and b' as `b_prime`
if not W:
# W is initialized with `initial_W` which is uniformely sampled
# from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible))the output of uniform if
# converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
initial_W = numpy.asarray(
numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)
),
dtype=theano.config.floatX
)
W = theano.shared(value=initial_W, name='W', borrow=True)
if not bvis:
bvis = theano.shared(
value=numpy.zeros(
n_visible,
dtype=theano.config.floatX
),
borrow=True
)
if not bhid:
bhid = theano.shared(
value=numpy.zeros(
n_hidden,
dtype=theano.config.floatX
),
name='b',
borrow=True
)
self.W = W
# b corresponds to the bias of the hidden
self.b = bhid
# b_prime corresponds to the bias of the visible
self.b_prime = bvis
# tied weights, therefore W_prime is W transpose
self.W_prime = self.W.T
self.theano_rng = theano_rng
# if no input is given, generate a variable representing the input
if input is None:
# we use a matrix because we expect a minibatch of several
# examples, each example being a row
self.x = T.dmatrix(name='input')
else:
self.x = input
self.params = [self.W, self.b, self.b_prime]
def get_cost_updates(self, center, corruption_level, learning_rate):
""" This function computes the cost and the updates ."""
tilde_x = self.get_corrupted_input(self.x, corruption_level)
y = self.get_hidden_values(tilde_x)
z = self.get_reconstructed_input(y)
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, with one entry per
# example in minibatch
# Using least-squares loss for both clustering and reconstruction
temp1 = T.pow(center - y, 2)
temp2 = T.pow(self.x - z, 2)
L = T.sum(temp1 , axis=1) + T.sum(temp2 , axis=1)
# note : L is now a vector, where each element is the
# cross-entropy cost of the reconstruction of the
# corresponding example of the minibatch. We need to
# compute the average of all these to get the cost of
# the minibatch
cost = T.mean(L)
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(self.params, gparams)
]
return (cost, updates)
def load_data(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
dataset
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, dataset)
print '... loading data'
# Load the dataset
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
# K-means clustering
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
#return shared_x, T.cast(shared_y, 'int32')
return shared_x, shared_y
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
# Prepare data
def deepclustering(learning_rate=0.1, training_epochs=15,
dataset='mnist.pkl.gz',
batch_size=20, output_folder='dA_plots'):
"""
This demo is tested on MNIST
:type learning_rate: float
:param learning_rate: learning rate used for training the DeNosing
AutoEncoder
:type training_epochs: int
:param training_epochs: number of epochs used for training
:type dataset: string
:param dataset: path to the pickled dataset
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
nHid = 2000
# Load the saved dA object, to initialize our model
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
label_true = train_set_y.get_value(borrow=True)
# start-snippet-2
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
center= T.matrix('center')
# end-snippet-2
#if not os.path.isdir(output_folder):
# os.makedirs(output_folder)
# os.chdir(output_folder)
####################################
# BUILDING THE MODEL NO CORRUPTION #
####################################
#Train a denosing autoencoder to initialize my own network, and provide latent representation for initializing clusteing
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
# Instancialize a dA class
# To get the initial clustering information
f = open('no_corruption.save', 'rb')
no_corruption = cPickle.load(f)
init_da = dA(
numpy_rng=rng,
theano_rng=theano_rng,
input=x,
n_visible=28 * 28,
n_hidden=nHid ,
)
init_da.params = no_corruption
hid = init_da.get_hidden_values(x)
hidden_da = theano.function(
[index],
outputs = hid,
givens = {x: train_set_x[index * batch_size: (index + 1) * batch_size]}
)
# go through training epochs
km = MiniBatchKMeans(n_clusters = 10, batch_size=100)
train_array = train_set_x.get_value()
ypred = km.fit_predict(train_array)
nmi_data = metrics.normalized_mutual_info_score(label_true, ypred)
hidden_val = []
for batch_index in xrange(n_train_batches):
hidden_val.append(hidden_da(batch_index))
hidden_array = numpy.asarray(hidden_val)
hidden_size = hidden_array.shape
hidden_array = numpy.reshape(hidden_array, (hidden_size[0] * hidden_size[1], hidden_size[2] ))
# Do a k-means clusering to get center_array
ypred = km.fit_predict(hidden_array)
nmi_disjoint = metrics.normalized_mutual_info_score(label_true, ypred)
center_array = km.cluster_centers_[[km.labels_]]
center_shared = theano.shared(numpy.asarray(center_array ,
dtype='float32'),
borrow=True)
dc = deep_clus(
numpy_rng=rng,
theano_rng=theano_rng,
input=x,
n_visible=28 * 28,
n_hidden=nHid
)
cost, updates = dc.get_cost_updates(
center,
corruption_level=0.,
learning_rate=learning_rate
)
#reconst = da.get_reconstructed_input(hidden)
# training a pure denoising autoencoder, without clustering, to get initial values to cluster
train_dc = theano.function(
inputs = [index],
outputs = cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
center: center_shared[index * batch_size: (index + 1) * batch_size]
}
)
start_time = timeit.default_timer()
############
# TRAINING #
############
for epoch in xrange(training_epochs):
# go through trainng set
c = []
for batch_index in xrange(n_train_batches):
cost_batch = train_dc(batch_index)
c.append(cost_batch)
print 'Training epoch %d, cost ' % epoch, numpy.mean(c)
hidden_val = []
for batch_index in xrange(n_train_batches):
hidden_val.append( hidden_da(batch_index))
hidden_array = numpy.asarray(hidden_val)
hidden_size = hidden_array.shape
hidden_array = numpy.reshape(hidden_array, (hidden_size[0] * hidden_size[1], hidden_size[2] ))
km.init = km.cluster_centers_
km.fit(hidden_array)
center_array = km.cluster_centers_[[km.labels_]]
center_shared = theano.shared(numpy.asarray(center_array ,
dtype='float32'),
borrow=True)
# print 'Training epoch %d, cost ' % epoch, numpy.mean(c)
end_time = timeit.default_timer()
ypred = km.predict(hidden_array)
nmi_dc = metrics.adjusted_mutual_info_score(label_true, ypred)
print 'Normalized mutual info for data KMeans: ' , nmi_data
print 'Normalized mutual info for disjoint clustering: ' , nmi_disjoint
print 'Normalized mutual info for deep clustering: ' , nmi_dc
training_time = (end_time - start_time)
print >> sys.stderr, ('The no corruption code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((training_time) / 60.))
image = Image.fromarray(
tile_raster_images(X=dc.W.get_value(borrow=True).T,
img_shape=(28, 28), tile_shape=(10, 10),
tile_spacing=(1, 1)))
image.save('filters_corruption_0.png')
if __name__ == '__main__':
deepclustering()
| 15,544 | 34.490868 | 124 | py |
DCN | DCN-master/RBMs_init.py | """
7/11/2016 Modified from DBN.py in DeepLearningTutorials. The purpose of this script
is to perform layerwise pretraining using RBM, and save the trained network for later
use. The fine-tuning part is thus removed.
"""
import os
import sys
import timeit
from six.moves import cPickle
import numpy
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
from rbm import RBM
#from DBN import DBN
# start-snippet-1
class RBMs_init(object):
"""Deep Belief Network
A deep belief network is obtained by stacking several RBMs on top of each
other. The hidden layer of the RBM at layer `i` becomes the input of the
RBM at layer `i+1`. The first layer RBM gets as input the input of the
network, and the hidden layer of the last RBM represents the output. When
used for classification, the DBN is treated as a MLP, by adding a logistic
regression layer on top.
"""
def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
hidden_layers_sizes=[500, 500], n_outs=10):
"""This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the DBN
:type hidden_layers_sizes: list of ints
:param hidden_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
"""
self.sigmoid_layers = []
self.rbm_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
assert self.n_layers > 0
if not theano_rng:
theano_rng = MRG_RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x') # the data is presented as rasterized images
self.y = T.ivector('y') # the labels are presented as 1D vector
# of [int] labels
# end-snippet-1
# The DBN is an MLP, for which all weights of intermediate
# layers are shared with a different RBM. We will first
# construct the DBN as a deep multilayer perceptron, and when
# constructing each sigmoidal layer we also construct an RBM
# that shares weights with that layer. During pretraining we
# will train these RBMs (which will lead to chainging the
# weights of the MLP as well) During finetuning we will finish
# training the DBN by doing stochastic gradient descent on the
# MLP.
for i in range(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden
# units of the layer below or the input size if we are on
# the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# the input to this layer is either the activation of the
# hidden layer below or the input of the DBN if you are on
# the first layer
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[-1].output
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.nnet.sigmoid)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
# its arguably a philosophical question... but we are
# going to only declare that the parameters of the
# sigmoid_layers are parameters of the DBN. The visible
# biases in the RBM are parameters of those RBMs, but not
# of the DBN.
# self.params.extend(sigmoid_layer.params)
# Construct an RBM that shared weights with this layer
rbm_layer = RBM(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W,
hbias=sigmoid_layer.b)
self.rbm_layers.append(rbm_layer)
self.params.extend(rbm_layer.params)
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.sigmoid_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs)
# self.params.extend(self.logLayer.params)
# compute the cost for second phase of training, defined as the
# negative log likelihood of the logistic regression (output) layer
self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
self.errors = self.logLayer.errors(self.y)
def pretraining_functions(self, train_set_x, batch_size, k):
'''Generates a list of functions, for performing one step of
gradient descent at a given layer. The function will require
as input the minibatch index, and to train an RBM you just
need to iterate, calling the corresponding function on all
minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared var. that contains all datapoints used
for training the RBM
:type batch_size: int
:param batch_size: size of a [mini]batch
:param k: number of Gibbs steps to do in CD-k / PCD-k
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
learning_rate = T.scalar('lr') # learning rate to use
# number of batches
n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for rbm in self.rbm_layers:
# get the cost and the updates list
# using CD-k here (persisent=None) for training each RBM.
# TODO: change cost function to reconstruction error
cost, updates = rbm.get_cost_updates(learning_rate,
persistent=None, k=k)
# compile the theano function
fn = theano.function(
inputs=[index, theano.In(learning_rate, value=0.1)],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin:batch_end]
}
)
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_functions(self, datasets, batch_size, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on a
batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
'''
(train_set_x, train_set_y) = datasets[0]
(valid_set_x, valid_set_y) = datasets[1]
(test_set_x, test_set_y) = datasets[2]
# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches /= batch_size
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# compute list of fine-tuning updates
updates = []
for param, gparam in zip(self.params, gparams):
updates.append((param, param - gparam * learning_rate))
train_fn = theano.function(
inputs=[index],
outputs=self.finetune_cost,
updates=updates,
givens={
self.x: train_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: train_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
test_score_i = theano.function(
[index],
self.errors,
givens={
self.x: test_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: test_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
valid_score_i = theano.function(
[index],
self.errors,
givens={
self.x: valid_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: valid_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
# Create a function that scans the entire validation set
def valid_score():
return [valid_score_i(i) for i in range(n_valid_batches)]
# Create a function that scans the entire test set
def test_score():
return [test_score_i(i) for i in range(n_test_batches)]
return train_fn, valid_score, test_score
def test_DBN(finetune_lr=0.1, pretraining_epochs= 20,
pretrain_lr=0.01, k=1, training_epochs=100,
dataset='mnist.pkl.gz', batch_size=10, hidden_dim = [1000, 1000, 1000]):
"""
Demonstrates how to train and test a Deep Belief Network.
This is demonstrated on MNIST.
:type finetune_lr: float
:param finetune_lr: learning rate used in the finetune stage
:type pretraining_epochs: int
:param pretraining_epochs: number of epoch to do pretraining
:type pretrain_lr: float
:param pretrain_lr: learning rate to be used during pre-training
:type k: int
:param k: number of Gibbs steps in CD/PCD
:type training_epochs: int
:param training_epochs: maximal number of iterations ot run the optimizer
:type dataset: string
:param dataset: path the the pickled dataset
:type batch_size: int
:param batch_size: the size of a minibatch
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
print '... building the model'
# construct the Deep Belief Network
rbms = RBMs_init(numpy_rng=numpy_rng, theano_rng=None, n_ins=28 * 28,
hidden_layers_sizes=hidden_dim,
n_outs=10)
# start-snippet-2
#########################
# PRETRAINING THE MODEL #
#########################
print '... getting the pretraining functions'
pretraining_fns = rbms.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size,
k=k)
print '... pre-training the model'
start_time = timeit.default_timer()
## Pre-train layer-wise
for i in range(rbms.n_layers):
# go through pretraining epochs
for epoch in range(pretraining_epochs):
# go through the training set
c = []
for batch_index in range(n_train_batches):
c.append(pretraining_fns[i](index=batch_index,
lr=pretrain_lr))
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print numpy.mean(c)
end_time = timeit.default_timer()
# end-snippet-2
print >> sys.stderr, ('The pretraining code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
f = open('RBMs_init_1000.save', 'wb')
cPickle.dump([param.get_value() for param in rbms.params], f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
# ########################
# # FINETUNING THE MODEL #
# ########################
#
# # get the training, validation and testing function for the model
# print '... getting the finetuning functions'
# train_fn, validate_model, test_model = dbn.build_finetune_functions(
# datasets=datasets,
# batch_size=batch_size,
# learning_rate=finetune_lr
# )
#
# print '... finetuning the model'
# # early-stopping parameters
# patience = 4 * n_train_batches # look as this many examples regardless
# patience_increase = 2. # wait this much longer when a new best is
# # found
# improvement_threshold = 0.995 # a relative improvement of this much is
# # considered significant
# validation_frequency = min(n_train_batches, patience / 2)
# # go through this many
# # minibatches before checking the network
# # on the validation set; in this case we
# # check every epoch
#
# best_validation_loss = numpy.inf
# test_score = 0.
# start_time = timeit.default_timer()
#
# done_looping = False
# epoch = 0
#
# while (epoch < training_epochs) and (not done_looping):
# epoch = epoch + 1
# for minibatch_index in range(n_train_batches):
#
# minibatch_avg_cost = train_fn(minibatch_index)
# iter = (epoch - 1) * n_train_batches + minibatch_index
#
# if (iter + 1) % validation_frequency == 0:
#
# validation_losses = validate_model()
# this_validation_loss = numpy.mean(validation_losses)
# print(
# 'epoch %i, minibatch %i/%i, validation error %f %%'
# % (
# epoch,
# minibatch_index + 1,
# n_train_batches,
# this_validation_loss * 100.
# )
# )
#
# # if we got the best validation score until now
# if this_validation_loss < best_validation_loss:
#
# #improve patience if loss improvement is good enough
# if (
# this_validation_loss < best_validation_loss *
# improvement_threshold
# ):
# patience = max(patience, iter * patience_increase)
#
# # save best validation score and iteration number
# best_validation_loss = this_validation_loss
# best_iter = iter
#
# # test it on the test set
# test_losses = test_model()
# test_score = numpy.mean(test_losses)
# print((' epoch %i, minibatch %i/%i, test error of '
# 'best model %f %%') %
# (epoch, minibatch_index + 1, n_train_batches,
# test_score * 100.))
#
# if patience <= iter:
# done_looping = True
# break
#
# end_time = timeit.default_timer()
# print(
# (
# 'Optimization complete with best validation score of %f %%, '
# 'obtained at iteration %i, '
# 'with test performance %f %%'
# ) % (best_validation_loss * 100., best_iter + 1, test_score * 100.)
# )
# print >> sys.stderr, ('The fine tuning code for file ' +
# os.path.split(__file__)[1] +
# ' ran for %.2fm' % ((end_time - start_time)
# / 60.))
# hidden_dim = [1000, 1000, 1000]
# hidden_dim = [1000, 500, 250, 2]
if __name__ == '__main__':
test_DBN(finetune_lr=0.1, pretraining_epochs= 100,
pretrain_lr=0.01, k=1, training_epochs=100,
dataset='mnist.pkl.gz', batch_size=10, hidden_dim = [1000, 500, 250, 2])
| 18,012 | 38.158696 | 100 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.