repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
Joshinn-io/augur | workers/message_insights_worker/message_novelty.py | import logging
import multiprocessing
import os
from datetime import date
import numpy as np
import pandas as pd
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from keras.layers import Dense, Input
from keras.models import Model, load_model
from scipy.spatial.distance import cosine
from skimage.filters import threshold_otsu
from sklearn import utils as skl_utils
from augur import ROOT_AUGUR_DIRECTORY
from workers.message_insights_worker.preprocess_text import \
normalize_corpus as normalize_corpus
train_path = os.path.join(ROOT_AUGUR_DIRECTORY, "workers", "message_insights_worker", "train_data")
''' Doc2Vec model training
def build_model(max_epochs, vec_size, alpha, tag_data):
model = Doc2Vec(vector_size=vec_size, alpha=alpha,min_alpha=0.00025, min_count=2, dm=1)
model.build_vocab(tag_data)
for epoch in range(max_epochs):
model.train(skl_utils.shuffle(tag_data),
total_examples=model.corpus_count,
epochs=model.epochs)
model.alpha -= 0.0002
model.min_alpha = model.alpha
model.save("doc2vec.model")
print("Model Saved")
return model
'''
def autoencoder(vec_input, train):
input_dim = Input(shape = (vec_input, ))
encoded1 = Dense(vec_input//2, activation='sigmoid')(input_dim)
encoded2 = Dense(1, activation='sigmoid')(encoded1)
# Decoder Layers
decoded1 = Dense(vec_input//2, activation='tanh')(encoded2)
decoded2 = Dense(vec_input, activation='tanh')(decoded1)
# Combine Encoder and Deocder layers
model = Model(inputs = input_dim, outputs = decoded2)
# Compile the Model
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mean_squared_error'])
model.fit(train, train, epochs = 20)
return model
def reconstruction(pred, val):
rec_error = []
for i in range(len(pred)):
rec_error.append(np.linalg.norm(pred[i] - val[i]))
rec_error = np.array(rec_error)
return rec_error
def get_normal_data(rec_error, val):
# otsu thresholding corresponding to the maximum value of between two class variances
threshold = threshold_otsu(rec_error)
normals = []
for i in range(len(rec_error)):
if rec_error[i] < threshold:
normals.append(val[i])
normals = np.array(normals)
return threshold, normals
''' Cosine similarity based novel detection
def key_cosine_similarity(tupple):
return tupple[1]
def get_computed_similarities(df_present, vectors, predicted_vectors, reverse=False):
data_size = len(df_present)
cosine_similarities = []
cosine_sim_values = []
for i in range(data_size):
cosine_sim_val = (1 - cosine(vectors[i], predicted_vectors[i]))
cosine_similarities.append((df_present['msg_id'].iloc[i], cosine_sim_val))
cosine_sim_values.append(cosine_sim_val)
df_present['uniqueness_score'] = cosine_sim_values
return df_present, sorted(cosine_similarities, key=key_cosine_similarity, reverse=reverse)
def display_unique(sorted_cosine_similarities):
i=0
unique_message_list=[]
cos_val = []
index, cosine_sim_val = sorted_cosine_similarities[0]
while cosine_sim_val<=-0.1:
if cosine_sim_val not in cos_val:
unique_message_list.append(index)
cos_val.append(cosine_sim_val)
print('Message id: ', index)
print('Cosine Sim Val :', cosine_sim_val)
i+=1
index, cosine_sim_val = sorted_cosine_similarities[i]
return unique_message_list
'''
def novelty_analysis(df_message, r_id, models_dir, full_train, logger=logging):
# Normlize text corpus
df_message['cleaned_msg_text'] = df_message['msg_text'].map(lambda x: normalize_corpus(x))
logger.info('Normalized text corpus')
# Load pretrained Doc2Vec model
d2v_model = Doc2Vec.load(os.path.join(train_path,"doc2vec.model"))
doc2vec_vectors = np.array([d2v_model.infer_vector(str(row['cleaned_msg_text']).split())for index, row in df_message.iterrows()])
logger.info('Doc2Vec vectorization done')
# Trains the AE model when worker runs first time
if full_train:
# First autoencoder to identify normal data records
ae1 = autoencoder(250, doc2vec_vectors)
logger.info('AE 1 training done')
pred_train = ae1.predict(doc2vec_vectors)
_rec_error1 = reconstruction(pred_train, doc2vec_vectors)
_, normal_data = get_normal_data(_rec_error1, doc2vec_vectors)
# Second autoencoder to decide threshold using otsu
ae = autoencoder(250, normal_data)
logger.info('AE 2 training done')
predicted_vectors = ae.predict(doc2vec_vectors)
rec_error = reconstruction(predicted_vectors, doc2vec_vectors)
threshold, _ = get_normal_data(rec_error, doc2vec_vectors)
# Save final model for future
ae.save(f'{models_dir}/{r_id}_uniq.h5')
# Pretrained AE model already exists, directly predict
else:
threshold = 0
ae = load_model(f'{models_dir}/{r_id}_uniq.h5')
logger.info('Loaded pretrained AE model for repo')
# Fitting on present data
predicted_vectors_test = ae.predict(doc2vec_vectors)
rec_error = reconstruction(predicted_vectors_test, doc2vec_vectors)
return (threshold, np.array(rec_error))
|
Joshinn-io/augur | augur/cli/_multicommand.py | <reponame>Joshinn-io/augur<filename>augur/cli/_multicommand.py
#SPDX-License-Identifier: MIT
"""
Runs Augur with Gunicorn when called
"""
import os
import sys
import click
import importlib
import augur.application
CONTEXT_SETTINGS = dict(auto_envvar_prefix='AUGUR')
class AugurMultiCommand(click.MultiCommand):
def __commands_folder(self):
return os.path.abspath(os.path.dirname(__file__))
def list_commands(self, ctx):
rv = []
for filename in os.listdir(self.__commands_folder()):
if not filename.startswith('_') and filename.endswith('.py'):
rv.append(filename[:-3])
rv.sort()
return rv
def get_command(self, ctx, name):
try:
module = importlib.import_module('.' + name, 'augur.cli')
return module.cli
except ModuleNotFoundError as e:
pass
@click.command(cls=AugurMultiCommand, context_settings=CONTEXT_SETTINGS)
@click.pass_context
def run(ctx):
"""
Augur is an application for open source community health analytics
"""
return ctx
|
Joshinn-io/augur | tests/test_metrics/test_commit_metrics.py | #SPDX-License-Identifier: MIT
import pytest
def test_annual_commit_count_ranked_by_repo_in_repo_group(metrics):
assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10).iloc[0].net > 0
assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10, 25430).iloc[0].net > 0
def test_annual_commit_count_ranked_by_new_repo_in_repo_group(metrics):
assert metrics.annual_commit_count_ranked_by_new_repo_in_repo_group(10).iloc[0].net > 0
assert metrics.annual_commit_count_ranked_by_new_repo_in_repo_group(10, 25430).iloc[0].net > 0
def test_top_committers(metrics):
assert metrics.top_committers(10, year=2017).iloc[0]['commits'] > 0
assert metrics.top_committers(10, year=2017, threshold=0.7).iloc[0]['commits'] > 0
assert metrics.top_committers(10, 25430, year=2017).iloc[0]['commits'] > 0
assert metrics.top_committers(10, 25430, year=2017, threshold=0.7).iloc[0]['commits'] > 0
assert metrics.top_committers(10).iloc[0]['commits'] > 0
assert metrics.top_committers(10, 25430).iloc[0]['commits'] > 0
def test_committer(metrics):
assert metrics.committers(10, period='year').iloc[0]['count'] > 0
assert metrics.committers(10, 25430,period='year').iloc[0]['count'] > 0
|
Joshinn-io/augur | workers/worker_base.py | <filename>workers/worker_base.py<gh_stars>100-1000
from workers.worker_persistance import *
#I figure I can seperate this class into at least three parts.
#I should also look into the subclass and see what uses what.
#
# Parts (Hierarchal relation)
#1. Persistance
#2. Base
#3. Github/lab
# Might be good to seperate the machine learning functionality into its own class too.
class Worker(Persistant):
## Set Thread Safety for OSX
# os.system("./osx-thread.sh")
def __init__(self, worker_type, config={}, given=[], models=[], data_tables=[], operations_tables=[], platform="github"):
#Construct the persistant functionality for the worker
super().__init__(worker_type,data_tables,operations_tables)
self.collection_start_time = None
self._task = None # task currently being worked on (dict)
self._child = None # process of currently running task (multiprocessing process)
self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes)
# if we are finishing a previous task, certain operations work differently
self.finishing_task = False
# Update config with options that are general and not specific to any worker
self.augur_config = AugurConfig(self._root_augur_dir)
#TODO: consider taking parts of this out for the base class and then overriding it in WorkerGitInterfaceable
self.config.update({'offline_mode': False})
self.config.update(config)
self.task_info = None
self.repo_id = None
self.owner = None
self.repo = None
self.given = given
self.models = models
self.debug_data = [] if 'debug_data' not in self.config else self.config['debug_data']
self.specs = {
'id': self.config['id'], # what the broker knows this worker as
'location': self.config['location'], # host + port worker is running on (so broker can send tasks here)
'qualifications': [
{
'given': self.given, # type of repo this worker can be given as a task
'models': self.models # models this worker can fill for a repo as a task
}
],
'config': self.config
}
# Send broker hello message
if self.config['offline_mode'] is False:
self.connect_to_broker()
try:
self.tool_source
self.tool_version
self.data_source
except:
self.tool_source = 'Augur Worker Testing'
self.tool_version = '0.0.0'
self.data_source = 'Augur Worker Testing'
def write_debug_data(self, data, name):
if name in self.debug_data:
with open(f'{name}.json', 'w') as f:
json.dump(data, f)
@property
def results_counter(self):
""" Property that is returned when the worker's current results_counter is referenced
"""
if self.worker_type == 'facade_worker':
return self.cfg.repos_processed #TODO: figure out why this doesn't work...
else:
return self._results_counter
@results_counter.setter
def results_counter(self, value):
""" entry point for the broker to add a task to the queue
Adds this task to the queue, and calls method to process queue
"""
self._results_counter = value
@property
def task(self):
""" Property that is returned when the worker's current task is referenced
"""
return self._task
@task.setter
def task(self, value):
""" entry point for the broker to add a task to the queue
Adds this task to the queue, and calls method to process queue
"""
# If the task has one of our "valid" job types
if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN":
self._queue.put(value)
# Setting that causes paginating through ALL pages, not just unknown ones
# This setting is set by the housekeeper and is attached to the task before it gets sent here
if 'focused_task' in value:
if value['focused_task'] == 1:
self.logger.debug("Focused task is ON\n")
self.finishing_task = True
self._task = value
self.run()
def cancel(self):
""" Delete/cancel current task
"""
self._task = None
def run(self):
""" Kicks off the processing of the queue if it is not already being processed
Gets run whenever a new task is added
"""
# Spawn a subprocess to handle message reading and performing the tasks
self._child = Process(target=self.collect, args=())
self._child.start()
def collect(self):
""" Function to process each entry in the worker's task queue
Determines what action to take based off the message type
"""
self.initialize_logging() # need to initialize logging again in child process cause multiprocessing
self.logger.info("Starting data collection process\n")
self.initialize_database_connections()
#self.logger.info("Got to this point.")
#self.logger.info(f"This is the oauths 0 index {self.oauths}")
while True:
if not self._queue.empty():
message = self._queue.get() # Get the task off our MP queue
else:
self.logger.info("No job found.")
break
self.logger.info("Popped off message: {}\n".format(str(message)))
if message['job_type'] == 'STOP':
break
# If task is not a valid job type
if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':
raise ValueError('{} is not a recognized task type'.format(message['job_type']))
pass
# Query repo_id corresponding to repo url of given task
repoUrlSQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(message['given'][self.given[0][0]]))
repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])
self.logger.info("repo_id for which data collection is being initiated: {}".format(str(repo_id)))
# Call method corresponding to model sent in task
try:
model_method = getattr(self, '{}_model'.format(message['models'][0]))
#TODO: set this to record exceptions seperatly. This errored and it took a while to figure that ^ wasn't the line that was erroring.
self.record_model_process(repo_id, 'repo_info')
except Exception as e:
self.logger.error('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) +
'must have name of {}_model'.format(message['models'][0]))
self.register_task_failure(message, repo_id, e)
break
# Model method calls wrapped in try/except so that any unexpected error that occurs can be caught
# and worker can move onto the next task without stopping
try:
self.logger.info("Calling model method {}_model".format(message['models'][0]))
self.task_info = message
self.repo_id = repo_id
self.owner, self.repo = self.get_owner_repo(list(message['given'].values())[0])
model_method(message, repo_id)
except Exception as e: # this could be a custom exception, might make things easier
self.register_task_failure(message, repo_id, e)
break
self.logger.debug('Closing database connections\n')
self.db.dispose()
self.helper_db.dispose()
self.logger.info("Collection process finished")
def connect_to_broker(self):
connected = False
for i in range(5):
try:
self.logger.debug("Connecting to broker, attempt {}\n".format(i))
if i > 0:
time.sleep(10)
requests.post('http://{}:{}/api/unstable/workers'.format(
self.config['host_broker'],self.config['port_broker']), json=self.specs)
self.logger.info("Connection to the broker was successful\n")
connected = True
break
except requests.exceptions.ConnectionError:
self.logger.error('Cannot connect to the broker. Trying again...\n')
if not connected:
sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n')
@staticmethod
def dump_queue(queue):
""" Empties all pending items in a queue and returns them in a list.
"""
result = []
queue.put("STOP")
for i in iter(queue.get, 'STOP'):
result.append(i)
# time.sleep(.1)
return result
def find_id_from_login(self, login, platform='github'):
""" Retrieves our contributor table primary key value for the contributor with
the given GitHub login credentials, if this contributor is not there, then
they get inserted.
:param login: String, the GitHub login username to find the primary key id for
:return: Integer, the id of the row in our database with the matching GitHub login
"""
idSQL = s.sql.text("""
SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' \
AND LOWER(data_source) = '{} api'
""".format(login, platform))
rs = pd.read_sql(idSQL, self.db, params={})
data_list = [list(row) for row in rs.itertuples(index=False)]
try:
return data_list[0][0]
except:
self.logger.info('contributor needs to be added...')
if platform == 'github':
cntrb_url = ("https://api.github.com/users/" + login)
elif platform == 'gitlab':
cntrb_url = ("https://gitlab.com/api/v4/users?username=" + login )
self.logger.info("Hitting endpoint: {} ...\n".format(cntrb_url))
while True:
try:
r = requests.get(url=cntrb_url, headers=self.headers)
break
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(30)
self.update_rate_limit(r)
contributor = r.json()
company = None
location = None
email = None
if 'company' in contributor:
company = contributor['company']
if 'location' in contributor:
location = contributor['location']
if 'email' in contributor:
email = contributor['email']
if platform == 'github':
cntrb = {
'cntrb_login': contributor['login'] if 'login' in contributor else None,
'cntrb_email': contributor['email'] if 'email' in contributor else None,
'cntrb_company': contributor['company'] if 'company' in contributor else None,
'cntrb_location': contributor['location'] if 'location' in contributor else None,
'cntrb_created_at': contributor['created_at'] if 'created_at' in contributor else None,
'cntrb_canonical': contributor['email'] if 'email' in contributor else None,
'gh_user_id': contributor['id'] if 'id' in contributor else None,
'gh_login': contributor['login'] if 'login' in contributor else None,
'gh_url': contributor['url'] if 'url' in contributor else None,
'gh_html_url': contributor['html_url'] if 'html_url' in contributor else None,
'gh_node_id': contributor['node_id'] if 'node_id' in contributor else None,
'gh_avatar_url': contributor['avatar_url'] if 'avatar_url' in contributor else None,
'gh_gravatar_id': contributor['gravatar_id'] if 'gravatar_id' in contributor else None,
'gh_followers_url': contributor['followers_url'] if 'followers_url' in contributor else None,
'gh_following_url': contributor['following_url'] if 'following_url' in contributor else None,
'gh_gists_url': contributor['gists_url'] if 'gists_url' in contributor else None,
'gh_starred_url': contributor['starred_url'] if 'starred_url' in contributor else None,
'gh_subscriptions_url': contributor['subscriptions_url'] if 'subscriptions_url' in contributor else None,
'gh_organizations_url': contributor['organizations_url'] if 'organizations_url' in contributor else None,
'gh_repos_url': contributor['repos_url'] if 'repos_url' in contributor else None,
'gh_events_url': contributor['events_url'] if 'events_url' in contributor else None,
'gh_received_events_url': contributor['received_events_url'] if 'received_events_url' in contributor else None,
'gh_type': contributor['type'] if 'type' in contributor else None,
'gh_site_admin': contributor['site_admin'] if 'site_admin' in contributor else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
elif platform == 'gitlab':
cntrb = {
'cntrb_login': contributor[0]['username'] if 'username' in contributor[0] else None,
'cntrb_email': email,
'cntrb_company': company,
'cntrb_location': location,
'cntrb_created_at': contributor[0]['created_at'] if 'created_at' in contributor[0] else None,
'cntrb_canonical': email,
'gh_user_id': contributor[0]['id'],
'gh_login': contributor[0]['username'],
'gh_url': contributor[0]['web_url'],
'gh_html_url': None,
'gh_node_id': None,
'gh_avatar_url': contributor[0]['avatar_url'],
'gh_gravatar_id': None,
'gh_followers_url': None,
'gh_following_url': None,
'gh_gists_url': None,
'gh_starred_url': None,
'gh_subscriptions_url': None,
'gh_organizations_url': None,
'gh_repos_url': None,
'gh_events_url': None,
'gh_received_events_url': None,
'gh_type': None,
'gh_site_admin': None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key))
self.results_counter += 1
self.cntrb_id_inc = int(result.inserted_primary_key[0])
self.logger.info(f"Inserted contributor: {cntrb['cntrb_login']}\n")
return self.find_id_from_login(login, platform)
def get_owner_repo(self, git_url):
""" Gets the owner and repository names of a repository from a git url
:param git_url: String, the git url of a repository
:return: Tuple, includes the owner and repository names in that order
"""
split = git_url.split('/')
owner = split[-2]
repo = split[-1]
if '.git' == repo[-4:]:
repo = repo[:-4]
return owner, repo
def record_model_process(self, repo_id, model):
self.logger.info(f"This is the oauths 0 index {self.oauths[0]}")
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": model,
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Stopped",
"total_results": self.results_counter
}
if self.finishing_task:
result = self.helper_db.execute(self.worker_history_table.update().where(
self.worker_history_table.c.history_id==self.history_id).values(task_history))
self.history_id += 1
else:
result = self.helper_db.execute(self.worker_history_table.insert().values(task_history))
self.logger.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key))
self.history_id = int(result.inserted_primary_key[0])
self.collection_start_time = time.time()
def register_task_completion(self, task, repo_id, model):
self.logger.info(f"Worker completed this task in {self.collection_start_time - time.time()} seconds.\n")
# Task to send back to broker
task_completed = {
'worker_id': self.config['id'],
'job_type': "MAINTAIN",
'repo_id': repo_id,
'job_model': model
}
key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else \
'gitlab_url' if 'gitlab_url' in task['given'] else 'INVALID_GIVEN'
task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] \
if 'git_url' in task['given'] else task['given']['gitlab_url'] if 'gitlab_url' in task['given'] else 'INVALID_GIVEN'
if key == 'INVALID_GIVEN':
self.register_task_failure(task, repo_id, "INVALID_GIVEN: Not a github/gitlab/git url.")
return
# Add to history table
task_history = {
'repo_id': repo_id,
'worker': self.config['id'],
'job_model': model,
'oauth_id': self.oauths[0]['oauth_id'],
'timestamp': datetime.datetime.now(),
'status': "Success",
'total_results': self.results_counter
}
self.helper_db.execute(self.worker_history_table.update().where(
self.worker_history_table.c.history_id==self.history_id).values(task_history))
self.logger.info(f"Recorded job completion for: {task_completed}\n")
# Update job process table
updated_job = {
'since_id_str': repo_id,
'last_count': self.results_counter,
'last_run': datetime.datetime.now(),
'analysis_state': 0
}
self.helper_db.execute(self.worker_job_table.update().where(
self.worker_job_table.c.job_model==model).values(updated_job))
self.logger.info(f"Updated job process for model: {model}\n")
if self.config['offline_mode'] is False:
# Notify broker of completion
self.logger.info(f"Telling broker we completed task: {task_completed}\n")
self.logger.info(f"This task inserted: {self.results_counter + self.insert_counter} tuples " +
f"and updated {self.update_counter} tuples.\n")
requests.post('http://{}:{}/api/unstable/completed_task'.format(
self.config['host_broker'],self.config['port_broker']), json=task_completed)
# Reset results counter for next task
self.results_counter = 0
self.insert_counter = 0
self.update_counter = 0
def register_task_failure(self, task, repo_id, e):
self.logger.error(f"Worker ran into an error for task: {task}")
self.logger.error(
f"Worker was processing this task for {self.collection_start_time - time.time()} "
"seconds."
)
self.logger.error("Printing traceback...")
self.logger.error(e)
tb = traceback.format_exc()
self.logger.error(tb)
self.logger.info(f"This task inserted {self.results_counter} tuples before failure.")
self.logger.info("Notifying broker and logging task failure in database...")
key = (
'github_url' if 'github_url' in task['given'] else 'git_url'
if 'git_url' in task['given'] else 'gitlab_url'
if 'gitlab_url' in task['given'] else 'INVALID_GIVEN'
)
url = task['given'][key]
""" Query all repos with repo url of given task """
repoUrlSQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(url))
repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])
task['worker_id'] = self.config['id']
try:
requests.post("http://{}:{}/api/unstable/task_error".format(
self.config['host_broker'],self.config['port_broker']), json=task)
except requests.exceptions.ConnectionError:
self.logger.error("Could not send task failure message to the broker:")
self.logger.error(e)
except Exception:
self.logger.error("An error occured while informing broker about task failure:")
self.logger.error(e)
# Add to history table
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": task['models'][0],
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Error",
"total_results": self.results_counter
}
self.helper_db.execute(
self.worker_history_table.update().where(
self.worker_history_table.c.history_id==self.history_id
).values(task_history)
)
self.logger.error(f"Recorded job error in the history table for: {task}")
# Update job process table
updated_job = {
"since_id_str": repo_id,
"last_count": self.results_counter,
"last_run": datetime.datetime.now(),
"analysis_state": 0
}
self.helper_db.execute(
self.worker_job_table.update().where(
self.worker_job_table.c.job_model==task['models'][0]
).values(updated_job)
)
self.logger.info(f"Updated job process for model: {task['models'][0]}\n")
# Reset results counter for next task
self.results_counter = 0
|
Joshinn-io/augur | augur/routes/batch.py | <filename>augur/routes/batch.py
#SPDX-License-Identifier: MIT
"""
Creates routes for the manager
"""
import logging
import time
import requests
import sqlalchemy as s
from sqlalchemy import exc
from flask import request, Response
from augur.util import metric_metadata
import json
logger = logging.getLogger(__name__)
def create_routes(server):
@server.app.route('/{}/batch'.format(server.api_version), methods=['GET', 'POST'])
def batch():
"""
Execute multiple requests, submitted as a batch.
:statuscode 207: Multi status
"""
server.show_metadata = False
if request.method == 'GET':
"""this will return sensible defaults in the future"""
return server.app.make_response('{"status": "501", "response": "Defaults for batch requests not implemented. Please POST a JSON array of requests to this endpoint for now."}')
try:
requests = json.loads(request.data.decode('utf-8'))
except ValueError as e:
request.abort(400)
responses = []
for index, req in enumerate(requests):
method = req['method']
path = req['path']
body = req.get('body', None)
try:
logger.debug('batch-internal-loop: %s %s' % (method, path))
with server.app.server.app.context():
with server.app.test_request_context(path,
method=method,
data=body):
try:
# Can modify flask.g here without affecting
# flask.g of the root request for the batch
# Pre process Request
rv = server.app.preprocess_request()
if rv is None:
# Main Dispatch
rv = server.app.dispatch_request()
except Exception as e:
rv = server.app.handle_user_exception(e)
response = server.app.make_response(rv)
# Post process Request
response = server.app.process_response(response)
# Response is a Flask response object.
# _read_response(response) reads response.response
# and returns a string. If your endpoints return JSON object,
# this string would be the response as a JSON string.
responses.append({
"path": path,
"status": response.status_code,
"response": str(response.get_data(), 'utf8'),
})
except Exception as e:
responses.append({
"path": path,
"status": 500,
"response": str(e)
})
return Response(response=json.dumps(responses),
status=207,
mimetype="server.app.ication/json")
"""
@api {post} /batch Batch Request Metadata
@apiName BatchMetadata
@apiGroup Batch
@apiDescription Returns metadata of batch requests
POST JSON of API requests metadata
"""
@server.app.route('/{}/batch/metadata'.format(server.api_version), methods=['GET', 'POST'])
def batch_metadata():
"""
Returns endpoint metadata in batch format
"""
server.show_metadata = True
if request.method == 'GET':
"""this will return sensible defaults in the future"""
return server.app.make_response(json.dumps(metric_metadata))
try:
requests = json.loads(request.data.decode('utf-8'))
except ValueError as e:
request.abort(400)
responses = []
for index, req in enumerate(requests):
method = req['method']
path = req['path']
body = req.get('body', None)
try:
logger.info('batch endpoint: ' + path)
with server.app.server.app.context():
with server.app.test_request_context(path,
method=method,
data=body):
try:
rv = server.app.preprocess_request()
if rv is None:
rv = server.app.dispatch_request()
except Exception as e:
rv = server.app.handle_user_exception(e)
response = server.app.make_response(rv)
response = server.app.process_response(response)
responses.append({
"path": path,
"status": response.status_code,
"response": str(response.get_data(), 'utf8'),
})
except Exception as e:
responses.append({
"path": path,
"status": 500,
"response": str(e)
})
server.show_metadata = False
return Response(response=json.dumps(responses),
status=207,
mimetype="server.app.ication/json")
|
Joshinn-io/augur | workers/deps_worker/vb_deps.py | import sys
import re
from pathlib import Path
def get_files(path):
#copied from example on https://docs.python.org/3/library/pathlib.html
dir = path
p = Path(dir)
files = list(p.glob('**/*.vb'))
return files
def get_deps_for_file(path):
f = open(path, 'r')
matches = re.findall("Imports\s*(.*)", f.read())
f.close()
return matches
|
Joshinn-io/augur | tests/test_application.py | #SPDX-License-Identifier: MIT
import pytest
import augur.application
import sqlalchemy as s
import json
from augur.application import Application
def test_init_augur_regular():
augur_app = Application(disable_logs=True)
assert augur_app is not None
def test_connect_to_database(monkeypatch):
def mock_fail_connection(self):
raise(s.exc.OperationalError("fake", "error", "message"))
monkeypatch.setattr(s.engine.Engine, "connect", mock_fail_connection)
monkeypatch.setenv("AUGUR_LOG_QUIET", "1")
with pytest.raises(s.exc.OperationalError):
augur_app = Application(disable_logs=True)
|
Joshinn-io/augur | augur/gunicorn.py | <filename>augur/gunicorn.py
import logging
import gunicorn.app.base
from gunicorn.arbiter import Arbiter
from augur.server import Server
logger = logging.getLogger(__name__)
class AugurGunicornApp(gunicorn.app.base.BaseApplication):
"""
Loads configurations, initializes Gunicorn, loads server
"""
def __init__(self, options={}, augur_app=None):
self.options = options
self.augur_app = augur_app
self.manager = self.augur_app.manager
self.broker = self.augur_app.broker
self.housekeeper = self.augur_app.housekeeper
self.server = None
logger.debug(f"Gunicorn will start {self.options['workers']} worker processes")
super(AugurGunicornApp, self).__init__()
def load_config(self):
"""
Sets the values for configurations
"""
config = {key: value for key, value in self.options.items()
if key in self.cfg.settings and value is not None}
for key, value in config.items():
self.cfg.set(key.lower(), value)
def get_augur_app(self):
"""
Returns the loaded server
"""
self.load()
return self.server.augur_app
def load(self):
"""
Returns the loaded server
"""
if self.server is None:
try:
self.server = Server(augur_app=self.augur_app)
except Exception as e:
logger.error(f"An error occured when Gunicorn tried to load the server: {e}")
return self.server.app
|
Joshinn-io/augur | augur/metrics/deps.py | <filename>augur/metrics/deps.py<gh_stars>1-10
#SPDX-License-Identifier: MIT
"""
Metrics that provide data about with insight detection and reporting
"""
import sqlalchemy as s
import pandas as pd
from augur.util import register_metric
@register_metric()
def deps(self, repo_group_id, repo_id=None):
depsSQL = s.sql.text("""
SELECT * FROM augur_data.dependencies
""")
results = pd.read_sql(depsSQL, self.database)
return results
|
Joshinn-io/augur | workers/deps_worker/deps_worker.py | #SPDX-License-Identifier: MIT
import os, subprocess
from datetime import datetime
import logging
from workers.worker_git_integration import WorkerGitInterfaceable
import requests
import json
from urllib.parse import quote
from multiprocessing import Process, Queue
import traceback
import pandas as pd
import sqlalchemy as s
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import MetaData
from workers.worker_base import Worker
from workers.deps_worker import dependancy_calculator as dep_calc
class DepsWorker(WorkerGitInterfaceable):
def __init__(self, config={}):
worker_type = "deps_worker"
# Define what this worker can be given and know how to interpret
given = [['git_url']]
models = ['deps', 'ossf_scorecard']
# Define the tables needed to insert, update, or delete on
data_tables = ['repo_dependencies', 'repo_deps_scorecard']
operations_tables = ['worker_history', 'worker_job']
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
self.config.update({
'repo_directory': self.augur_config.get_value('Workers', 'facade_worker')['repo_directory']
})
self.tool_source = 'Deps Worker'
self.tool_version = '2.0.0'
self.data_source = 'Augur Repository Data'
def deps_model(self, entry_info, repo_id):
""" Data collection and storage method
"""
self.logger.info(f"This is the deps model entry info: {entry_info}.")
self.logger.info(f"This is the deps model repo: {repo_id}.")
repo_path_sql = s.sql.text("""
SELECT repo_id, CONCAT(repo_group_id || chr(47) || repo_path || repo_name) AS path
FROM repo
WHERE repo_id = :repo_id
""")
relative_repo_path = self.db.execute(repo_path_sql, {'repo_id': repo_id}).fetchone()[1]
absolute_repo_path = self.config['repo_directory'] + relative_repo_path
try:
self.generate_deps_data(repo_id, absolute_repo_path)
except Exception as e:
self.logger.debug(f"This is the error generated: {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
self.register_task_completion(entry_info, repo_id, "deps")
def ossf_scorecard_model(self, entry_info, repo_id):
""" Data collection and storage method
"""
self.logger.info('Scorecard Model called...')
self.logger.info(f"The entry info: {entry_info}.")
self.logger.info(f"The repo id: {repo_id}.")
repo_path_sql = s.sql.text("""
SELECT repo_id, repo_git AS path
FROM repo
WHERE repo_id = :repo_id
""")
scorecard_repo_path = self.db.execute(repo_path_sql, {'repo_id': repo_id}).fetchone()[1]
# absolute_repo_path = self.config['repo_directory'] + relative_repo_path
## TODO: Flesh out generate_scorecard
## You can look at the Value worker to see how Go Programs are already called in Augur.
#
try:
self.generate_scorecard(repo_id, scorecard_repo_path)
except Exception as e:
self.logger.debug(f"This is the error for scorecard generation: {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
self.register_task_completion(entry_info, repo_id, "deps")
def generate_scorecard(self, repo_id, path):
"""Runs scorecard on repo and stores data in database
:param repo_id: Repository ID
:param path: URL path of the Repostiory
"""
self.logger.info('Generating scorecard data for repo...')
self.logger.info(f'Repo ID: {repo_id}, Path: {path}')
# we convert relative path in the format required by scorecard like github.com/chaoss/augur
# raw_path,_ = path.split('-')
# scorecard_repo_path = raw_path[2:]
path = path[8:]
if path[-4:] == '.git':
path = path.replace(".git", "")
command = '--repo='+ path
#this is path where our scorecard project is located
path_to_scorecard = os.environ['HOME'] + '/scorecard'
#setting the environmental variable which is required by scorecard
os.environ['GITHUB_AUTH_TOKEN'] = self.config['gh_api_key']
p= subprocess.run(['./scorecard', command], cwd= path_to_scorecard ,capture_output=True, text=True, timeout=None)
self.logger.info('subprocess completed successfully... ')
output = p.stdout.split('\n')
required_output = output[4:20]
self.logger.info('adding to database...')
try:
for test in required_output:
temp = test.split()
repo_deps_scorecard = {
'repo_id': repo_id,
'name': temp[0],
'status': temp[1],
'score': temp[2],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
}
result = self.db.execute(self.repo_deps_scorecard_table.insert().values(repo_deps_scorecard))
self.logger.info(f"Added OSSF scorecard data : {result.inserted_primary_key}")
except Exception as e:
self.logger.debug(f"Encountered trouble and exception registered inserting scorecard info: {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
def generate_deps_data(self, repo_id, path):
"""Runs scc on repo and stores data in database
:param repo_id: Repository ID
:param path: Absolute path of the Repostiory
"""
self.logger.info('Searching for deps in repo')
self.logger.info(f'Repo ID: {repo_id}, Path: {path}')
deps = dep_calc.get_deps(path)
try:
for dep in deps:
repo_deps = {
'repo_id': repo_id,
'dep_name' : dep.name,
'dep_count' : dep.count,
'dep_language' : dep.language,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
}
result = self.db.execute(self.repo_dependencies_table.insert().values(repo_deps))
self.logger.info(f"Added dep: {result.inserted_primary_key}")
except Exception as e:
self.logger.debug(f"generate deps data failed on {e}.")
stacker = traceback.format_exc()
self.logger.debug(f"{stacker}")
pass
|
Joshinn-io/augur | workers/release_worker/__init__.py | #SPDX-License-Identifier: MIT
"""gh_release_worker - Augur Worker that collects GitHub Repo Info data"""
__version__ = '0.0.0'
__author__ = '<NAME> <<EMAIL>>'
__all__ = []
|
Joshinn-io/augur | workers/facade_worker/contributor_interfaceable/contributor_interface.py | from requests.api import head
from workers.worker_base import *
import logging
from logging import FileHandler, Formatter, StreamHandler, log
from workers.worker_git_integration import WorkerGitInterfaceable
from workers.util import read_config
from psycopg2.errors import UniqueViolation
from random import randint
import json
# Debugger
import traceback
"""
This class serves as an extension for the facade worker to allow it to make api calls and interface with GitHub.
The motivation for doing it this way is because the functionality needed to interface with Github and/or GitLab
is contained within WorkerGitInterfaceable. This is a problem because facade was migrated into augur from its own
project and just making it inherit from a new class could have unintended consequences and moreover, the facade worker
doesn't even really need the WorkerGitInterfaceable for too much. This is mainly just to have better parity with the contributor
worker and table.
"""
"""
A few interesting ideas: Maybe get the top committers from each repo first? curl https://api.github.com/repos/chaoss/augur/contributors
"""
# TODO : Make this borrow everything that it can from the facade worker.
# i.e. port, logging, etc
class ContributorInterfaceable(WorkerGitInterfaceable):
def __init__(self, config={}, logger=None, special_rate_limit=10):
# Define the data tables that we are needing
# Define the tables needed to insert, update, or delete on
worker_type = "contributor_interface"
self.data_tables = ['contributors', 'pull_requests', 'commits',
'pull_request_assignees', 'pull_request_events', 'pull_request_labels',
'pull_request_message_ref', 'pull_request_meta', 'pull_request_repo',
'pull_request_reviewers', 'pull_request_teams', 'message', 'pull_request_commits',
'pull_request_files', 'pull_request_reviews', 'pull_request_review_message_ref',
'contributors_aliases', 'unresolved_commit_emails']
self.operations_tables = ['worker_history', 'worker_job']
self.platform = "github"
# first set up logging.
self._root_augur_dir = Persistant.ROOT_AUGUR_DIR
self.augur_config = AugurConfig(self._root_augur_dir)
# Get default logging settings
self.config = config
# self.config.update(self.augur_config.get_section("Logging"))
# create a random port instead of 226
# SPG 9/24/2021
# self.facade_com = randint(47000,47555)
# contrib_port = self.facade_com
# Get the same logging dir as the facade worker.
# self.config.update({
# # self.config['port_database'])
# 'id': "workers.{}.{}".format("contributor_interface", contrib_port)
# })
# Getting stuck here.
# self.initialize_logging()
self.logger = logger
# try:
# theConfig = self.augur_config.get_section(["contributor_interface"])
# jsonConfig = json.loads(theConfig)
# self.logger.debug(f"The config for workers is: {json.dumps(jsonConfig, indent=2, sort_keys=True)}.")
# except Exception as e:
# self.logger.debug(f"Exception in initialization is: {e}.")
# self.logger = logging.getLogger(self.config["id"])
# Test logging after init.
self.logger.info(
"Facade worker git interface logging set up correctly")
# self.db_schema = None
self.config.update({
'gh_api_key': self.augur_config.get_value('Database', 'key'),
'gitlab_api_key': self.augur_config.get_value('Database', 'gitlab_api_key')
# 'port': self.augur_config.get_value('Workers', 'contributor_interface')
})
# Get config passed from the facade worker.
self.initialize_database_connections()
self.logger.info("Facade worker git interface database set up")
self.logger.info(f"configuration passed is: {str(self.config)}.")
# set up the max amount of requests this interface is allowed to make before sleeping for 2 minutes
self.special_rate_limit = special_rate_limit
self.recent_requests_made = 0
#Needs to be an attribute of the class for incremental database insert using paginate_endpoint
self.pk_source_prs = []
self.logger.info("Facade now has contributor interface.")
self.tool_source = '\'Facade Worker\''
self.tool_version = '\'1.0.1\''
self.data_source = '\'Git Log\''
return
def initialize_logging(self):
# Get the log level in upper case from the augur config's logging section.
self.config['log_level'] = self.config['log_level'].upper()
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
if self.config['verbose']:
format_string = AugurLogging.verbose_format_string
else:
format_string = AugurLogging.simple_format_string
format_string = AugurLogging.verbose_format_string
# log_port = self.facade_com
# Use stock python formatter for stdout
formatter = Formatter(fmt=format_string)
# User custom for stderr, Gives more info than verbose_format_string
error_formatter = Formatter(fmt=AugurLogging.error_format_string)
worker_type = "contributor_interface"
worker_dir = AugurLogging.get_log_directories(
self.augur_config, reset_logfiles=False) + "/workers/"
Path(worker_dir).mkdir(exist_ok=True)
logfile_dir = worker_dir + f"/{worker_type}/"
Path(logfile_dir).mkdir(exist_ok=True)
# Create more complex sublogs in the logfile directory determined by the AugurLogging class
server_logfile = logfile_dir + \
'{}_{}_server.log'.format(
worker_type, self.config['port_database'])
collection_logfile = logfile_dir + \
'{}_{}_collection.log'.format(
worker_type, self.config['port_database'])
collection_errorfile = logfile_dir + \
'{}_{}_collection.err'.format(
worker_type, self.config['port_database'])
self.config.update({
'logfile_dir': logfile_dir,
'server_logfile': server_logfile,
'collection_logfile': collection_logfile,
'collection_errorfile': collection_errorfile
})
collection_file_handler = FileHandler(
filename=self.config['collection_logfile'], mode="a")
collection_file_handler.setFormatter(formatter)
collection_file_handler.setLevel(self.config['log_level'])
collection_errorfile_handler = FileHandler(
filename=self.config['collection_errorfile'], mode="a")
collection_errorfile_handler.setFormatter(error_formatter)
collection_errorfile_handler.setLevel(logging.WARNING)
logger = logging.getLogger(self.config['id'])
logger.handlers = []
logger.addHandler(collection_file_handler)
logger.addHandler(collection_errorfile_handler)
logger.setLevel(self.config['log_level'])
logger.propagate = False
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
console_handler = StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(self.config['log_level'])
logger.addHandler(console_handler)
if self.config['quiet']:
logger.disabled = True
self.logger = logger
self.tool_source = '\'Facade Worker\'s Contributor Interface\''
self.tool_version = '\'0.2.0\''
self.data_source = '\'Git Log\''
# Try to construct the best url to ping GitHub's API for a username given an email.
"""
I changed this because of the following note on the API site: With the in qualifier you can restrict your search to the username (login), full name, public email, or any combination of these. When you omit this qualifier, only the username and email address are searched. For privacy reasons, you cannot search by email domain name.
https://docs.github.com/en/github/searching-for-information-on-github/searching-on-github/searching-users#search-only-users-or-organizations
"""
def create_endpoint_from_email(self, email):
self.logger.info(f"Trying to resolve contributor from email: {email}")
# Note: I added "+type:user" to avoid having user owned organizations be returned
# Also stopped splitting per note above.
url = 'https://api.github.com/search/users?q={}+in:email+type:user'.format(
email)
# self.logger.info(f"url is: {url}") redundant log statement.
# (
# email.split('@')[0], email.split('@')[-1])
return url
# Try to construct the best url to ping GitHub's API for a username given a full name.
def create_endpoint_from_name(self, contributor):
self.logger.info(
f"Trying to resolve contributor from name: {contributor}")
# Try to get the 'names' field if 'commit_name' field is not present in contributor data.
name_field = 'cmt_author_name' if 'commit_name' in contributor else 'name'
# Deal with case where name is one word or none.
if len(contributor[name_field].split()) < 2:
raise ValueError
cmt_cntrb = {
'fname': contributor[name_field].split()[0],
# Pythonic way to get the end of a list so that we truely get the last name.
'lname': contributor[name_field].split()[-1]
}
url = 'https://api.github.com/search/users?q=fullname:{}+{}'.format(
cmt_cntrb['fname'], cmt_cntrb['lname'])
return url
# Hit the endpoint specified by the url and return the json that it returns if it returns a dict.
# Returns None on failure.
def request_dict_from_endpoint(self, url, timeout_wait=10):
self.logger.info(f"Hitting endpoint: {url}")
attempts = 0
response_data = None
success = False
# This borrow's the logic to safely hit an endpoint from paginate_endpoint.
while attempts < 10:
try:
response = requests.get(url=url, headers=self.headers)
except TimeoutError:
self.logger.info(
f"User data request for enriching contributor data failed with {attempts} attempts! Trying again...")
time.sleep(timeout_wait)
continue
# Make sure we know how many requests our api tokens have.
self.update_rate_limit(response, platform="github")
# Update the special rate limit
self.recent_requests_made += 1
# Sleep if we have made a lot of requests recently
if self.recent_requests_made == self.special_rate_limit:
self.recent_requests_made = 0
self.logger.info(
f"Special rate limit of {self.special_rate_limit} reached! Sleeping for thirty seconds.")
# Sleep for thirty seconds before making a new request.
time.sleep(60)
try:
response_data = response.json()
except:
response_data = json.loads(json.dumps(response.text))
if type(response_data) == dict:
# Sometimes GitHub Sends us an error message in a dict instead of a string.
# While a bit annoying, it is easy to work around
if 'message' in response_data:
try:
assert 'API rate limit exceeded' not in response_data['message']
except AssertionError as e:
self.logger.info(
f"Detected error in response data from gitHub. Trying again... Error: {e}")
attempts += 1
continue
# self.logger.info(f"Returned dict: {response_data}")
success = True
break
elif type(response_data) == list:
self.logger.warning("Wrong type returned, trying again...")
self.logger.info(f"Returned list: {response_data}")
elif type(response_data) == str:
self.logger.info(
f"Warning! page_data was string: {response_data}")
if "<!DOCTYPE html>" in response_data:
self.logger.info("HTML was returned, trying again...\n")
elif len(response_data) == 0:
self.logger.warning("Empty string, trying again...\n")
else:
try:
# Sometimes raw text can be converted to a dict
response_data = json.loads(response_data)
success = True
break
except:
pass
attempts += 1
if not success:
return None
return response_data
def insert_alias(self, contributor, email):
# Insert cntrb_id and email of the corresponding record into the alias table
# Another database call to get the contributor id is needed because its an autokeyincrement that is accessed by multiple workers
# Same principle as enrich_cntrb_id method.
contributor_table_data = self.db.execute(
s.sql.select([s.column('cntrb_id'), s.column('cntrb_canonical')]).where(
self.contributors_table.c.gh_user_id == contributor["gh_user_id"]
)
).fetchall()
# self.logger.info(f"Contributor query: {contributor_table_data}")
# Handle potential failures
if len(contributor_table_data) == 1:
self.logger.info(
f"cntrb_id {contributor_table_data[0]['cntrb_id']} found in database and assigned to enriched data")
elif len(contributor_table_data) == 0:
self.logger.error("Couldn't find contributor in database. Something has gone very wrong. Augur ran into a contributor whose login can be found in the contributor's table, but cannot be retrieved via the user_id that was gotten using the same login.")
raise LookupError
else:
self.logger.info(
f"There are more than one contributors in the table with gh_user_id={contributor['gh_user_id']}")
self.logger.info(f"Creating alias for email: {email}")
# Insert a new alias that corresponds to where the contributor was found
# use the email of the new alias for canonical_email if the api returns NULL
# TODO: It might be better to have the canonical_email allowed to be NUll because right now it has a null constraint.
alias = {
"cntrb_id": contributor_table_data[0]['cntrb_id'],
"alias_email": email,
"canonical_email": contributor['cntrb_canonical'] if 'cntrb_canonical' in contributor and contributor['cntrb_canonical'] is not None else email,
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
# Insert new alias
try:
self.db.execute(
self.contributors_aliases_table.insert().values(alias))
except s.exc.IntegrityError:
# It's expected to catch duplicates this way so no output is logged.
pass
self.logger.info(f"alias {alias} already exists")
except Exception as e:
self.logger.info(
f"Ran into issue with alias: {alias}. Error: {e}")
return
# Takes the user data from the endpoint as arg
# Updates the alias table if the login is already in the contributor's table with the new email.
# Returns whether the login was found in the contributors table
def resolve_if_login_existing(self, contributor):
# check if login exists in contributors table
select_cntrbs_query = s.sql.text("""
SELECT cntrb_id from contributors
WHERE cntrb_login = :gh_login_value
""")
# Bind parameter
select_cntrbs_query = select_cntrbs_query.bindparams(
gh_login_value=contributor['cntrb_login'])
result = self.db.execute(select_cntrbs_query)
# if yes
if len(result.fetchall()) >= 1:
#self.insert_alias(contributor, email) Functions should do one thing ideally.
return True
# If not found, return false
self.logger.info(
f"Contributor not found in contributors table but can be added. Adding...")
return False
def update_contributor(self, cntrb, max_attempts=3):
# Get primary key so that we can update
contributor_table_data = self.db.execute(
s.sql.select([s.column('cntrb_id'), s.column('cntrb_canonical') ]).where(
self.contributors_table.c.gh_user_id == cntrb["gh_user_id"]
)
).fetchall()
attempts = 0
#make sure not to overwrite canonical email if it isn't NULL
canonical_email = contributor_table_data[0]['cntrb_canonical']
#check if the contributor has a NULL canonical email or not
self.logger.info(f"The value of the canonical email is : {canonical_email}")
if canonical_email is not None:
del cntrb["cntrb_canonical"]
while attempts < max_attempts:
try:
# Using with on a sqlalchemy connection prevents 'Connection refused' error
# Ensures all database requests use the same connection
with self.db.connect() as connection:
# Use primary key to update the correct data.
# It is important that non null data is not overwritten.
connection.execute(self.contributors_table.update().where(
self.contributors_table.c.cntrb_id == contributor_table_data[0]['cntrb_id']
).values(cntrb))
break # break if success.
except Exception as e:
self.logger.info(
f"Ran into exception updating contributor with data: {cntrb}. Error: {e}")
# give a delay so that we have a greater chance of success.
time.sleep(1)
attempts += 1
# Try every distinct email found within a commit for possible username resolution.
# Add email to garbage table if can't be resolved.
# \param contributor is the raw database entry
# \return A dictionary of response data from github with potential logins on success.
# None on failure
def fetch_username_from_email(self, commit):
# Default to failed state
login_json = None
self.logger.info(f"Here is the commit: {commit}")
# email = commit['email_raw'] if 'email_raw' in commit else commit['email_raw']
if len(commit['email_raw']) <= 2:
return login_json # Don't bother with emails that are blank or less than 2 characters
try:
url = self.create_endpoint_from_email(commit['email_raw'])
except Exception as e:
self.logger.info(
f"Couldn't resolve email url with given data. Reason: {e}")
# If the method throws an error it means that we can't hit the endpoint so we can't really do much
return login_json
login_json = self.request_dict_from_endpoint(
url, timeout_wait=30)
# Check if the email result got anything, if it failed try a name search.
if login_json == None or 'total_count' not in login_json or login_json['total_count'] == 0:
self.logger.info(
f"Could not resolve the username from {commit['email_raw']}")
# Go back to failure condition
login_json = None
# Add the email that couldn't be resolved to a garbage table.
unresolved = {
"email": commit['email_raw'],
"name": commit['name'],
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
self.logger.info(f"Inserting data to unresolved: {unresolved}")
try:
self.db.execute(
self.unresolved_commit_emails_table.insert().values(unresolved))
except s.exc.IntegrityError:
pass # Pass because duplicate checking is expected
except Exception as e:
self.logger.info(
f"Could not create new unresolved email {unresolved['email']}. Error: {e}")
else:
# Return endpoint dictionary if email found it.
return login_json
# failure condition returns None
return login_json
# Update the contributors table from the data facade has gathered.
def insert_facade_contributors(self, repo_id):
self.logger.info(
"Beginning process to insert contributors from facade commits for repo w entry info: {}\n".format(repo_id))
# Get all of the commit data's emails and names from the commit table that do not appear
# in the contributors table or the contributors_aliases table.
new_contrib_sql = s.sql.text("""
SELECT DISTINCT
commits.cmt_author_name AS NAME,--commits.cmt_id AS id,
commits.cmt_author_raw_email AS email_raw,
'not_unresolved' as resolution_status
FROM
commits
WHERE
commits.repo_id = :repo_id
AND (NOT EXISTS ( SELECT contributors.cntrb_canonical FROM contributors WHERE contributors.cntrb_canonical = commits.cmt_author_raw_email )
or NOT EXISTS ( SELECT contributors_aliases.alias_email from contributors_aliases where contributors_aliases.alias_email = commits.cmt_author_raw_email)
AND ( commits.cmt_author_name ) IN ( SELECT C.cmt_author_name FROM commits AS C WHERE C.repo_id = :repo_id GROUP BY C.cmt_author_name ))
GROUP BY
commits.cmt_author_name,
commits.cmt_author_raw_email
UNION
SELECT DISTINCT
commits.cmt_author_name AS NAME,--commits.cmt_id AS id,
commits.cmt_author_raw_email AS email_raw,
'unresolved' as resolution_status
FROM
commits
WHERE
commits.repo_id = :repo_id
AND EXISTS ( SELECT unresolved_commit_emails.email FROM unresolved_commit_emails WHERE unresolved_commit_emails.email = commits.cmt_author_raw_email )
AND ( commits.cmt_author_name ) IN ( SELECT C.cmt_author_name FROM commits AS C WHERE C.repo_id = :repo_id GROUP BY C.cmt_author_name )
GROUP BY
commits.cmt_author_name,
commits.cmt_author_raw_email
ORDER BY
NAME
""")
new_contribs = json.loads(pd.read_sql(new_contrib_sql, self.db, params={
'repo_id': repo_id}).to_json(orient="records"))
# Try to get GitHub API user data from each unique commit email.
for contributor in new_contribs:
# Get list of all emails in the commit data.
# Start with the fields we know that we can start with
email = contributor['email_raw'] if 'email_raw' in contributor else contributor['email']
# check the email to see if it already exists in contributor_aliases
try:
# Look up email to see if resolved
alias_table_data = self.db.execute(
s.sql.select([s.column('alias_email')]).where(
self.contributors_aliases_table.c.alias_email == email
)
).fetchall()
if len(alias_table_data) >= 1:
# Move on if email resolved
self.logger.info(
f"Email {email} has been resolved earlier.")
continue
except Exception as e:
self.logger.info(
f"alias table query failed with error: {e}")
# Try to get login from all possible emails
# Is None upon failure.
login_json = self.fetch_username_from_email(contributor)
# Check if the email result got anything, if it failed try a name search.
if login_json == None or 'total_count' not in login_json or login_json['total_count'] == 0:
self.logger.info(
"Could not resolve the username from the email. Trying a name only search...")
try:
url = self.create_endpoint_from_name(contributor)
except Exception as e:
self.logger.info(
f"Couldn't resolve name url with given data. Reason: {e}")
continue
login_json = self.request_dict_from_endpoint(
url, timeout_wait=30)
# total_count is the count of username's found by the endpoint.
if login_json == None or 'total_count' not in login_json:
self.logger.info(
"Search query returned an empty response, moving on...\n")
continue
if login_json['total_count'] == 0:
self.logger.info(
"Search query did not return any results, adding commit's table remains null...\n")
continue
# Grab first result and make sure it has the highest match score
match = login_json['items'][0]
for item in login_json['items']:
if item['score'] > match['score']:
match = item
self.logger.info("When searching for a contributor with info {}, we found the following users: {}\n".format(
contributor, match))
url = ("https://api.github.com/users/" + match['login'])
user_data = self.request_dict_from_endpoint(url)
if user_data == None:
self.logger.warning(
f"user_data was unable to be reached. Skipping...")
continue
# Use the email found in the commit data if api data is NULL
emailFromCommitData = contributor['email_raw'] if 'email_raw' in contributor else contributor['email']
self.logger.info(
f"Successfully retrieved data from github for email: {emailFromCommitData}")
# Get name from commit if not found by GitHub
name_field = contributor['commit_name'] if 'commit_name' in contributor else contributor['name']
try:
# try to add contributor to database
cntrb = {
"cntrb_login": user_data['login'],
"cntrb_created_at": user_data['created_at'],
"cntrb_email": user_data['email'] if 'email' in user_data else None,
"cntrb_company": user_data['company'] if 'company' in user_data else None,
"cntrb_location": user_data['location'] if 'location' in user_data else None,
# "cntrb_type": , dont have a use for this as of now ... let it default to null
"cntrb_canonical": user_data['email'] if 'email' in user_data and user_data['email'] is not None else emailFromCommitData,
"gh_user_id": user_data['id'],
"gh_login": user_data['login'],
"gh_url": user_data['url'],
"gh_html_url": user_data['html_url'],
"gh_node_id": user_data['node_id'],
"gh_avatar_url": user_data['avatar_url'],
"gh_gravatar_id": user_data['gravatar_id'],
"gh_followers_url": user_data['followers_url'],
"gh_following_url": user_data['following_url'],
"gh_gists_url": user_data['gists_url'],
"gh_starred_url": user_data['starred_url'],
"gh_subscriptions_url": user_data['subscriptions_url'],
"gh_organizations_url": user_data['organizations_url'],
"gh_repos_url": user_data['repos_url'],
"gh_events_url": user_data['events_url'],
"gh_received_events_url": user_data['received_events_url'],
"gh_type": user_data['type'],
"gh_site_admin": user_data['site_admin'],
"cntrb_last_used": None if 'updated_at' not in user_data else user_data['updated_at'],
# Get name from commit if api doesn't get it.
"cntrb_full_name": name_field if 'name' not in user_data or user_data['name'] is None else user_data['name'],
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
# self.logger.info(f"{cntrb}")
except Exception as e:
self.logger.info(f"Error: {e}")
# Check if the github login exists in the contributors table and add the emails to alias' if it does.
# Also update the contributor record with commit data if we can.
try:
if not self.resolve_if_login_existing(cntrb):
try:
self.db.execute(
self.contributors_table.insert().values(cntrb))
except Exception as e:
self.logger.info(
f"Ran into likely database collision. Assuming contributor exists in database. Error: {e}")
else:
self.update_contributor(cntrb)
# Update alias after insertion. Insertion needs to happen first so we can get the autoincrementkey
self.insert_alias(cntrb, emailFromCommitData)
except LookupError as e:
self.logger.info(
''.join(traceback.format_exception(None, e, e.__traceback__)))
self.logger.info(
f"Contributor id not able to be found in database despite the user_id existing. Something very wrong is happening. Error: {e}")
# Resolve any unresolved emails if we get to this point.
# They will get added to the alias table later
# Do this last to absolutely make sure that the email was resolved before we remove it from the unresolved table.
query = s.sql.text("""
DELETE FROM unresolved_commit_emails
WHERE email='{}'
""".format(email))
self.logger.info(f"Updating now resolved email {email}")
try:
self.db.execute(query)
except Exception as e:
self.logger.info(
f"Deleting now resolved email failed with error: {e}")
# sql query used to find corresponding cntrb_id's of emails found in the contributor's table
# i.e., if a contributor already exists, we use it!
resolve_email_to_cntrb_id_sql = s.sql.text("""
SELECT DISTINCT
cntrb_id,
contributors.cntrb_canonical AS email,
commits.cmt_author_raw_email
FROM
contributors,
commits
WHERE
contributors.cntrb_canonical = commits.cmt_author_raw_email
AND commits.repo_id = :repo_id
UNION
SELECT DISTINCT
cntrb_id,
contributors_aliases.alias_email AS email,
commits.cmt_author_raw_email
FROM
contributors_aliases,
commits
WHERE
contributors_aliases.alias_email = commits.cmt_author_raw_email
AND commits.repo_id = :repo_id
""")
# Get a list of dicts that contain the emails and cntrb_id's of commits that appear in the contributor's table.
existing_cntrb_emails = json.loads(pd.read_sql(resolve_email_to_cntrb_id_sql, self.db, params={
'repo_id': repo_id}).to_json(orient="records"))
# iterate through all the commits with emails that appear in contributors and give them the relevant cntrb_id.
for cntrb_email in existing_cntrb_emails:
self.logger.info(
f"These are the emails and cntrb_id's returned: {cntrb_email}")
try:
self.db.execute(self.commits_table.update().where(
self.commits_table.c.cmt_committer_email == cntrb_email['email']
).values({
'cmt_ght_author_id': cntrb_email['cntrb_id']
}))
except Exception as e:
self.logger.info(
f"Ran into problem when enriching commit data. Error: {e}")
return
def create_endpoint_from_repo_id(self, repo_id):
select_repo_path_query = s.sql.text("""
SELECT repo_path, repo_name from repo
WHERE repo_id = :repo_id_bind
""")
# Bind parameter
select_repo_path_query = select_repo_path_query.bindparams(
repo_id_bind=repo_id)
result = self.db.execute(select_repo_path_query).fetchall()
# if not found
if not len(result) >= 1:
raise LookupError
# Else put into a more readable local var
self.logger.info(f"Result: {result}")
repo_path = result[0]['repo_path'].split(
"/")[1] + "/" + result[0]['repo_name']
# Create endpoint for committers in a repo.
url = "https://api.github.com/repos/" + repo_path + "/contributors?state=all&direction=asc&per_page=100&page={}"
self.logger.info(f"Url: {url}")
return url
# Get all the committer data for a repo.
# Used by facade in facade03analyzecommit
def grab_committer_list(self, repo_id, platform="github"):
# Create API endpoint from repo_id
try:
endpoint = self.create_endpoint_from_repo_id(repo_id)
except Exception as e:
self.logger.info(
f"Could not create endpoint from repo {repo_id} because of ERROR: {e}")
# Exit on failure
return
# HIt the endpoint if we can and put it in a dict
#committer_json = self.request_dict_from_endpoint(
# endpoint, timeout_wait=0)
#Prepare for pagination and insertion into the contributor's table with an action map
# TODO: this might be github specific
committer_action_map = {
'insert': {
'source': ['login'],
'augur': ['cntrb_login']
}
}
#Create a method so that paginate_endpoint knows how our records need to be inserted
def committer_insert(inc_source_comitters, action_map):
if len(inc_source_comitters['all']) == 0:
self.logger.info("There are no committers for this repository.\n")
#self.register_task_completion(self.task_info, self.repo_id, 'pull_requests')
return
#self.logger.debug(f"inc_source_committers is: {inc_source_comitters} and the action map is {action_map}...")
cntrbs_insert = [
{
"cntrb_login": cntrb['login'],
"cntrb_company": cntrb['company'] if 'company' in cntrb else None,
# "cntrb_type": , dont have a use for this as of now ... let it default to null
"gh_user_id": cntrb['id'],
"gh_login": cntrb['login'],
"gh_url": cntrb['url'],
"gh_html_url": cntrb['html_url'],
"gh_node_id": cntrb['node_id'],
"gh_avatar_url": cntrb['avatar_url'],
"gh_gravatar_id": cntrb['gravatar_id'],
"gh_followers_url": cntrb['followers_url'],
"gh_following_url": cntrb['following_url'],
"gh_gists_url": cntrb['gists_url'],
"gh_starred_url": cntrb['starred_url'],
"gh_subscriptions_url": cntrb['subscriptions_url'],
"gh_organizations_url": cntrb['organizations_url'],
"gh_repos_url": cntrb['repos_url'],
"gh_events_url": cntrb['events_url'],
"gh_received_events_url": cntrb['received_events_url'],
"gh_type": cntrb['type'],
"gh_site_admin": cntrb['site_admin'],
"cntrb_last_used" : None if 'updated_at' not in cntrb else cntrb['updated_at'],
"cntrb_full_name" : None if 'name' not in cntrb else cntrb['name'],
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
} for cntrb in inc_source_comitters['all']
]
inserted = len(inc_source_comitters['all'])
#Try to insert all committers
for committer in cntrbs_insert:
try:
self.db.execute(
self.contributors_table.insert().values(committer))
except Exception as e:
self.logger.info(f"Could not insert new committer ERROR: {e}")
inserted -= 1 #Decrement the insertion cound
self.logger.info(f"Inserted {inserted} new contributors.")
return
source_committers = self.paginate_endpoint(
endpoint, action_map=committer_action_map, table=self.contributors_table,
where_clause=True,
stagger=True,
insertion_method=committer_insert
)
#self.logger.info(f"source committers: {source_committers}")
committer_insert(source_committers, committer_action_map)
''' Future method to try and get additional info for partially populated users.
def get_information_from_commits(self, repo_id):
get_cntrb_info_from_commits = s.sql.text("""
SELECT DISTINCT
contributors.cntrb_login
FROM
contributors
WHERE
cntrb_canonical IS NULL
""")
Call the Github API for each of these people and fill in
any missing information '''
|
fsmosca/chess-tuning-tools | tune/summary.py | <gh_stars>10-100
import numpy as np
__all__ = ["confidence_intervals"]
def _round_interval(interval, threshold=0.01, max_precision=32):
diff = interval[1] - interval[0]
if diff == 0:
return tuple(interval)
for i in range(max_precision):
rounded = np.around(interval, decimals=i)
diff_i = np.diff(rounded).item()
rel_error = abs(diff_i - diff) / diff
if (
rel_error <= threshold
and len(np.unique(rounded)) == 2
and rounded[0] < rounded[1]
):
return tuple(np.around(interval, decimals=i))
def _round_all_intervals(intervals, threshold=0.01, max_precision=32):
result = []
for dim in intervals:
sub_result = []
if hasattr(dim[0], "__len__"):
for sub in dim:
sub_result.append(
_round_interval(
sub, threshold=threshold, max_precision=max_precision
)
)
else:
sub_result.append(
_round_interval(dim, threshold=threshold, max_precision=max_precision)
)
result.append(sub_result)
return result
def confidence_intervals(
optimizer,
param_names=None,
hdi_prob=0.95,
multimodal=True,
opt_samples=200,
space_samples=500,
only_mean=True,
random_state=None,
max_precision=32,
threshold=0.01,
):
if param_names is None:
param_names = [
"Parameter {}".format(i) for i in range(len(optimizer.space.dimensions))
]
intervals = optimizer.optimum_intervals(
hdi_prob=hdi_prob,
multimodal=multimodal,
opt_samples=opt_samples,
space_samples=space_samples,
only_mean=only_mean,
random_state=random_state,
)
rounded = _round_all_intervals(
intervals, max_precision=max_precision, threshold=threshold
)
max_param_length = max(max((len(x) for x in param_names)), 9)
max_lb = max(max(len(str(row[0])) for sub in rounded for row in sub), 11)
max_ub = max(max(len(str(row[1])) for sub in rounded for row in sub), 11)
format_string = "{:<{}} {:>{}} {:>{}}\n"
output = format_string.format(
"Parameter", max_param_length, "Lower bound", max_lb, "Upper bound", max_ub
)
output += "{:-^{}}\n".format("", max_param_length + max_lb + max_ub + 4)
for sub, name in zip(rounded, param_names):
for i, interval in enumerate(sub):
if i == 0:
name_out = name
else:
name_out = ""
output += format_string.format(
name_out, max_param_length, interval[0], max_lb, interval[1], max_ub
)
return output
|
fsmosca/chess-tuning-tools | tests/__init__.py | <filename>tests/__init__.py<gh_stars>10-100
"""Unit test package for chess_tuning_tools."""
|
fsmosca/chess-tuning-tools | tests/test_priors.py | <filename>tests/test_priors.py
import numpy as np
from pytest import approx, raises
from tune.priors import create_priors, make_invgamma_prior, roundflat
def test_roundflat():
assert roundflat(0.3) == approx(0.0, abs=1e-6)
assert roundflat(0.0) == -np.inf
assert roundflat(-1.0) == -np.inf
def test_make_invgamma_prior():
prior = make_invgamma_prior()
assert prior.kwds["a"] == approx(8.919240823584246)
assert prior.kwds["scale"] == approx(1.7290248731437994)
with raises(ValueError):
make_invgamma_prior(lower_bound=-1e-10)
with raises(ValueError):
make_invgamma_prior(upper_bound=-1e-10)
with raises(ValueError):
make_invgamma_prior(lower_bound=0.5, upper_bound=0.1)
def test_create_priors():
priors = create_priors(n_parameters=3)
assert len(priors) == 5
assert priors[0](2.0) == approx(-1.536140897416146)
assert priors[1](2.0) == approx(-23.620792572134874)
assert priors[2](2.0) == approx(-23.620792572134874)
assert priors[3](2.0) == approx(-23.620792572134874)
assert priors[4](2.0) == approx(-10262570.41553909)
with raises(ValueError):
create_priors(n_parameters=3, signal_scale=0.0)
with raises(ValueError):
create_priors(n_parameters=3, noise_scale=0.0)
|
fsmosca/chess-tuning-tools | tests/test_io.py | <gh_stars>0
from tune.io import load_tuning_config
def test_load_tuning_config():
testdict = {
"engines": [
{"command": "lc0", "fixed_parameters": {"CPuctBase": 13232, "Threads": 2}},
{"command": "sf", "fixed_parameters": {"Threads": 8}},
],
"parameter_ranges": {"CPuct": "Real(0.0, 1.0)"},
"gp_samples": 100,
}
json_dict, commands, fixed_params, param_ranges = load_tuning_config(testdict)
assert len(json_dict) == 1
assert "gp_samples" in json_dict
assert len(commands) == 2
assert len(fixed_params) == 2
assert len(param_ranges) == 1
|
fsmosca/chess-tuning-tools | tune/priors.py | import warnings
from typing import Callable, List
import numpy as np
from scipy.optimize import curve_fit
from scipy.stats import halfnorm, invgamma
from scipy.stats._distn_infrastructure import rv_frozen # noqa
__all__ = ["make_invgamma_prior", "roundflat", "create_priors"]
def roundflat(x, a_low=2.0, a_high=8.0, d_low=0.005, d_high=1.2):
"""Return the log probability of the round flat prior.
The round flat prior is completely uninformative inside the interval bounds
``d_low`` and ``d_high`` while smoothly going to -inf for values outside.
``a_low`` and ``a_high`` specify how quickly the density falls at the boundaries.
Args:
x (float): A parameter value in [0, inf) for which to compute the log
probability
a_low (float): Steepness of the prior at the boundary ``d_low``.
a_high (float): Steepness of the prior at the boundary ``d_high``.
d_low (float): Lower boundary for which the log probability is -2.
d_high (float): Upper boundary for which the log probability is -2.
Returns:
The log probability for x.
"""
if x <= 0:
return -np.inf
return -2 * ((x / d_low) ** (-2 * a_low) + (x / d_high) ** (2 * a_high))
def make_invgamma_prior(
lower_bound: float = 0.1, upper_bound: float = 0.5
) -> rv_frozen:
"""Create an inverse gamma distribution prior with 98% density inside the bounds.
Not all combinations of (lower_bound, upper_bound) are feasible and some of them
could result in a RuntimeError.
Parameters
----------
lower_bound : float, default=0.1
Lower bound at which 1 % of the cumulative density is reached.
upper_bound : float, default=0.5
Upper bound at which 99 % of the cumulative density is reached.
Returns
-------
scipy.stats._distn_infrastructure.rv_frozen
The frozen distribution with shape parameters already set.
Raises
------
ValueError
Either if any of the bounds is 0 or negative, or if the upper bound is equal or
smaller than the lower bound.
"""
if lower_bound <= 0 or upper_bound <= 0:
raise ValueError("The bounds cannot be equal to or smaller than 0.")
if lower_bound >= upper_bound:
raise ValueError(
"Lower bound needs to be strictly smaller than the upper " "bound."
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
(a_out, scale_out), pcov = curve_fit(
lambda xdata, a, scale: invgamma.ppf(xdata, a=a, scale=scale),
[0.01, 0.99],
[lower_bound, upper_bound],
)
return invgamma(a=a_out, scale=scale_out)
def create_priors(
n_parameters: int,
signal_scale: float = 4.0,
lengthscale_lower_bound: float = 0.1,
lengthscale_upper_bound: float = 0.5,
noise_scale: float = 0.0006,
) -> List[Callable[[float], float]]:
"""Create a list of priors to be used for the hyperparameters of the tuning process.
Parameters
----------
n_parameters : int
Number of parameters to be optimized.
signal_scale : float
Prior scale of the signal (standard deviation) which is used to parametrize a
half-normal distribution.
lengthscale_lower_bound : float
Lower bound of the inverse-gamma lengthscale prior. It marks the point at which
1 % of the cumulative density is reached.
lengthscale_upper_bound : float
Upper bound of the inverse-gamma lengthscale prior. It marks the point at which
99 % of the cumulative density is reached.
noise_scale : float
Prior scale of the noise (standard deviation) which is used to parametrize a
half-normal distribution.
Returns
-------
list of callables
List of priors in the following order:
- signal prior
- lengthscale prior (n_parameters times)
- noise prior
"""
if signal_scale <= 0.0:
raise ValueError(
f"The signal scale needs to be strictly positive. Got {signal_scale}."
)
if noise_scale <= 0.0:
raise ValueError(
f"The noise scale needs to be strictly positive. Got {noise_scale}."
)
signal_prior = halfnorm(scale=signal_scale)
lengthscale_prior = make_invgamma_prior(
lower_bound=lengthscale_lower_bound, upper_bound=lengthscale_upper_bound
)
noise_prior = halfnorm(scale=noise_scale)
priors = [lambda x: signal_prior.logpdf(np.sqrt(np.exp(x))) + x / 2.0 - np.log(2.0)]
for _ in range(n_parameters):
priors.append(lambda x: lengthscale_prior.logpdf(np.exp(x)) + x)
priors.append(
lambda x: noise_prior.logpdf(np.sqrt(np.exp(x))) + x / 2.0 - np.log(2.0)
)
return priors
|
fsmosca/chess-tuning-tools | tune/db_workers/__init__.py | from .tuning_client import TuningClient
from .tuning_server import TuningServer
__all__ = ["TuningClient", "TuningServer"]
|
fsmosca/chess-tuning-tools | tune/io.py | <reponame>fsmosca/chess-tuning-tools<filename>tune/io.py
import json
import re
import sys
from ast import literal_eval
from collections.abc import MutableMapping
from pathlib import Path
import skopt.space as skspace
from skopt.space.space import check_dimension
__all__ = [
"InitStrings",
"uci_tuple",
"parse_ranges",
"load_tuning_config",
"prepare_engines_json",
"write_engines_json",
]
# TODO: Backup file to restore it, should there be an error
def uci_tuple(uci_string):
try:
name, value = re.findall(r"name\s+(\S.*?)\s+value\s+(.*?)\s*$", uci_string)[0]
except IndexError:
print(f"Error parsing UCI tuples:\n{uci_string}")
sys.exit(1)
try:
tmp = float(value)
except ValueError:
tmp = value
return name, tmp
def _set_option(name, value):
if str(value) in ("False", "True"):
value = str(value).lower()
return f"setoption name {name} value {value}"
class InitStrings(MutableMapping):
def __init__(self, init_strings):
self._init_strings = init_strings
def __len__(self):
return len(self._init_strings)
def __getitem__(self, key):
for s in self._init_strings:
if s == "uci":
continue
name, value = uci_tuple(s)
if key == name:
return value
raise KeyError(key)
def __setitem__(self, key, value):
for i, s in enumerate(self._init_strings):
if s == "uci":
continue
name, _ = uci_tuple(s)
if key == name:
self._init_strings[i] = _set_option(key, value)
return
self._init_strings.append(_set_option(key, value))
def __delitem__(self, key):
elem = -1
for i, s in enumerate(self._init_strings):
if s == "uci":
continue
name, _ = uci_tuple(s)
if key == name:
elem = i
break
if elem != -1:
del self._init_strings[i]
else:
raise KeyError(key)
def __contains__(self, key):
for s in self._init_strings:
if s == "uci":
continue
name, _ = uci_tuple(s)
if key == name:
return True
return False
def __iter__(self):
for s in self._init_strings:
if s == "uci":
continue
name, _ = uci_tuple(s)
yield name
def __repr__(self):
return repr(self._init_strings)
def _make_numeric(s):
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
def parse_ranges(s):
if isinstance(s, str):
j = json.loads(s)
else:
j = s
dimensions = []
for s in j.values():
# First check, if the string is a list/tuple or a function call:
param_str = re.findall(r"(\w+)\(", s)
if len(param_str) > 0: # Function
args, kwargs = [], dict()
# TODO: this split does not always work
# (example Categorical(["a", "b", "c"]))
prior_param_strings = re.findall(r"\((.*?)\)", s)[0].split(",")
for arg_string in prior_param_strings:
# Check if arg or kwarg:
if "=" in arg_string: # kwarg:
# trim all remaining whitespace:
arg_string = "".join(arg_string.split())
key, val = arg_string.split("=")
kwargs[key] = _make_numeric(val)
elif "[" in arg_string or "(" in arg_string:
args.append(literal_eval(arg_string))
else: # args:
val = _make_numeric(arg_string)
args.append(val)
if hasattr(skspace, param_str[0]):
dim = getattr(skspace, param_str[0])(*args, **kwargs)
else:
raise ValueError("Dimension {} does not exist.".format(param_str))
dimensions.append(dim)
else: # Tuple or list
# We assume that the contents of the collection should be used as is and
# construct a python list/tuple
# skopt.space.check_dimension will be used for validation
parsed = literal_eval(s)
if isinstance(parsed, (tuple, list)):
dimensions.append(check_dimension(parsed))
else:
raise ValueError(
"Dimension {} is not valid. Make sure you pass a Dimension, tuple "
"or list.".format(param_str)
)
return dict(zip(j.keys(), dimensions))
def load_tuning_config(json_dict):
""" Load the provided tuning configuration and split it up.
Parameters
----------
json_dict : dict
Dictionary containing the engines, their fixed parameters, the tunable
parameter ranges and other settings used during tuning.
Returns
-------
json_dict : dict
Remaining settings after the engine configuration and the ranges have
been stripped off
commands : list of strings
fixed_params : list of dicts
UCI parameters to be set for the engines.
param_ranges : dict
UCI parameters of the first engine which are to be optimized during tuning.
The values correspond to skopt.space dimension definitions.
"""
commands = []
fixed_params = []
if "engines" not in json_dict:
raise ValueError("Tuning config does not contain engines.")
engines = json_dict["engines"]
for e in engines:
if "command" not in e:
raise ValueError("Tuning config contains an engine without command.")
commands.append(e["command"])
if "fixed_parameters" not in e:
fixed_params.append(dict())
else:
fixed_params.append(e["fixed_parameters"])
del json_dict["engines"]
if "parameter_ranges" not in json_dict:
raise ValueError("There are no parameter ranges defined in the config file.")
param_ranges = parse_ranges(json_dict["parameter_ranges"])
del json_dict["parameter_ranges"]
# All remaining settings will be returned as is:
return json_dict, commands, fixed_params, param_ranges
def prepare_engines_json(commands, fixed_params):
result_list = [
{
"command": c,
"name": f"engine{i+1}",
"initStrings": ["uci"],
"protocol": "uci",
}
for i, c in enumerate(commands)
]
for r, fp in zip(result_list, fixed_params):
uci = InitStrings(r["initStrings"])
uci.update(fp)
return result_list
def write_engines_json(engine_json, point_dict):
engine = engine_json[0]
initstr = InitStrings(engine["initStrings"])
initstr.update(point_dict)
with open(Path() / "engines.json", "w") as file:
json.dump(engine_json, file, sort_keys=True, indent=4)
|
fsmosca/chess-tuning-tools | tune/db_workers/dbmodels/base_model.py | <reponame>fsmosca/chess-tuning-tools
__all__ = ["Base"]
try:
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
except ImportError:
Base = None
declarative_base = None
|
fsmosca/chess-tuning-tools | tune/local.py | <filename>tune/local.py
import logging
import pathlib
import re
import subprocess
import sys
import time
from datetime import datetime
from logging import Logger
from typing import Callable, List, Optional, Sequence, Tuple, Union
import dill
import matplotlib.pyplot as plt
import numpy as np
from bask import Optimizer
from numpy.random import RandomState
from scipy.optimize import OptimizeResult
from scipy.special import erfinv
from scipy.stats import dirichlet
from skopt.space import Categorical, Dimension, Integer, Real, Space
from skopt.utils import normalize_dimensions
from tune.plots import plot_objective
from tune.summary import confidence_intervals
from tune.utils import TimeControl, expected_ucb
__all__ = [
"counts_to_penta",
"initialize_optimizer",
"run_match",
"parse_experiment_result",
"print_results",
"plot_results",
"reduce_ranges",
"update_model",
"elo_to_prob",
"prob_to_elo",
"setup_logger",
]
LOGGER = "ChessTuner"
def elo_to_prob(elo, k=4.0):
"""Convert an Elo score (logit space) to a probability.
Parameters
----------
elo : float
A real-valued Elo score.
k : float, optional (default=4.0)
Scale of the logistic distribution.
Returns
-------
float
Win probability
Raises
------
ValueError
if k <= 0
"""
if k <= 0:
raise ValueError("k must be positive")
return 1 / (1 + np.power(10, -elo / k))
def prob_to_elo(p, k=4.0):
"""Convert a win probability to an Elo score (logit space).
Parameters
----------
p : float
The win probability of the player.
k : float, optional (default=4.0)
Scale of the logistic distribution.
Returns
-------
float
Elo score of the player
Raises
------
ValueError
if k <= 0
"""
if k <= 0:
raise ValueError("k must be positive")
return k * np.log10(-p / (p - 1))
def counts_to_penta(
counts: np.ndarray,
prior_counts: Optional[np.ndarray] = None,
n_dirichlet_samples: int = 1000000,
score_scale: float = 4.0,
random_state: Union[int, RandomState, None] = None,
**kwargs,
) -> Tuple[float, float]:
"""Compute mean Elo score and variance of the pentanomial model for a count array.
Parameters
----------
counts : np.ndarray
Array of counts for WW, WD, WL/DD, LD and LL
prior_counts : np.ndarray or None, default=None
Pseudo counts to use for WW, WD, WL/DD, LD and LL in the
pentanomial model.
n_dirichlet_samples : int, default = 1 000 000
Number of samples to draw from the Dirichlet distribution in order to
estimate the standard error of the score.
score_scale : float, optional (default=4.0)
Scale of the logistic distribution used to calculate the score. Has to be a
positive real number
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
kwargs : dict
Additional keyword arguments
Returns
-------
tuple (float, float)
Mean Elo score and corresponding variance
"""
if prior_counts is None:
prior_counts = np.array([0.14, 0.19, 0.34, 0.19, 0.14]) * 2.5
elif len(prior_counts) != 5:
raise ValueError("Argument prior_counts should contain 5 elements.")
dist = dirichlet(alpha=counts + prior_counts)
scores = [0.0, 0.25, 0.5, 0.75, 1.0]
score = prob_to_elo(dist.mean().dot(scores), k=score_scale)
error = prob_to_elo(
dist.rvs(n_dirichlet_samples, random_state=random_state).dot(scores),
k=score_scale,
).var()
return score, error
def setup_logger(verbose: int = 0, logfile: str = "log.txt") -> Logger:
"""Setup logger with correct verbosity and file handler.
Parameters
----------
verbose : int
Verbosity level. If verbose = 0, use INFO level, otherwise DEBUG.
logfile : str
Desired path to the logfile.
Returns
-------
Logger
Logger to be used for logging.
"""
log_level = logging.DEBUG if verbose > 0 else logging.INFO
log_format = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
logger = logging.getLogger(LOGGER)
logger.setLevel(log_level)
logger.propagate = False
file_logger = logging.FileHandler(logfile)
file_logger.setFormatter(log_format)
logger.addHandler(file_logger)
console_logger = logging.StreamHandler(sys.stdout)
console_logger.setFormatter(log_format)
logger.addHandler(console_logger)
return logger
def reduce_ranges(
X: Sequence[list], y: Sequence[float], noise: Sequence[float], space: Space
) -> Tuple[bool, List[list], List[float], List[float]]:
"""Return all data points consistent with the new restricted space.
Parameters
----------
X : Sequence of lists
Contains n_points many lists, each representing one configuration.
y : Sequence of floats
Contains n_points many scores, one for each configuration.
noise : Sequence of floats
Contains n_points many variances, one for each score.
space : skopt.space.Space
Space object specifying the new optimization space.
Returns
-------
Tuple (bool, list, list, list)
Returns a boolean indicating if a reduction of the dataset was needed and the
corresponding new X, y and noise lists.
"""
X_new = []
y_new = []
noise_new = []
reduction_needed = False
for row, yval, nval in zip(X, y, noise):
include_row = True
for dim, value in zip(space.dimensions, row):
if isinstance(dim, Integer) or isinstance(dim, Real):
lb, ub = dim.bounds
if value < lb or value > ub:
include_row = False
elif isinstance(dim, Categorical):
if value not in dim.bounds:
include_row = False
else:
raise ValueError(f"Parameter type {type(dim)} unknown.")
if include_row:
X_new.append(row)
y_new.append(yval)
noise_new.append(nval)
else:
reduction_needed = True
return reduction_needed, X_new, y_new, noise_new
def initialize_data(
parameter_ranges: Sequence[Union[Sequence, Dimension]],
data_path: Optional[str] = None,
resume: bool = True,
) -> Tuple[list, list, list, int]:
"""Initialize data structures needed for tuning. Either empty or resumed from disk.
Parameters
----------
parameter_ranges : Sequence of Dimension objects or tuples
Parameter range specifications as expected by scikit-optimize.
data_path : str or None, default=None
Path to the file containing the data structures used for resuming.
If None, no resuming will be performed.
resume : bool, default=True
If True, fill the data structures with the the data from the given data_path.
Otherwise return empty data structures.
Returns
-------
tuple consisting of list, list, list and int
Returns the initialized data structures X, y, noise and iteration number.
Raises
------
ValueError
If the number of specified parameters is not matching the existing number of
parameters in the data.
"""
logger = logging.getLogger()
X = []
y = []
noise = []
iteration = 0
if data_path is not None and resume:
space = normalize_dimensions(parameter_ranges)
path = pathlib.Path(data_path)
if path.exists():
with np.load(path) as importa:
X = importa["arr_0"].tolist()
y = importa["arr_1"].tolist()
noise = importa["arr_2"].tolist()
if len(X[0]) != space.n_dims:
raise ValueError(
f"Number of parameters ({len(X[0])}) are not matching "
f"the number of dimensions ({space.n_dims})."
)
reduction_needed, X_reduced, y_reduced, noise_reduced = reduce_ranges(
X, y, noise, space
)
if reduction_needed:
backup_path = path.parent / (
path.stem + f"_backup_{int(time.time())}" + path.suffix
)
logger.warning(
f"The parameter ranges are smaller than the existing data. "
f"Some points will have to be discarded. "
f"The original {len(X)} data points will be saved to "
f"{backup_path}"
)
np.savez_compressed(
backup_path, np.array(X), np.array(y), np.array(noise)
)
X = X_reduced
y = y_reduced
noise = noise_reduced
iteration = len(X)
return X, y, noise, iteration
def setup_random_state(seed: int) -> np.random.RandomState:
"""Return a seeded RandomState object.
Parameters
----------
seed : int
Random seed to be used to seed the RandomState.
Returns
-------
numpy.random.RandomState
RandomState to be used to generate random numbers.
"""
ss = np.random.SeedSequence(seed)
return np.random.RandomState(np.random.MT19937(ss.spawn(1)[0]))
def initialize_optimizer(
X: Sequence[list],
y: Sequence[float],
noise: Sequence[float],
parameter_ranges: Sequence[Union[Sequence, Dimension]],
random_seed: int = 0,
warp_inputs: bool = True,
n_points: int = 500,
n_initial_points: int = 16,
acq_function: str = "mes",
acq_function_samples: int = 1,
resume: bool = True,
fast_resume: bool = True,
model_path: Optional[str] = None,
gp_initial_burnin: int = 100,
gp_initial_samples: int = 300,
gp_priors: Optional[List[Callable[[float], float]]] = None,
) -> Optimizer:
"""Create an Optimizer object and if needed resume and/or reinitialize.
Parameters
----------
X : Sequence of lists
Contains n_points many lists, each representing one configuration.
y : Sequence of floats
Contains n_points many scores, one for each configuration.
noise : Sequence of floats
Contains n_points many variances, one for each score.
parameter_ranges : Sequence of Dimension objects or tuples
Parameter range specifications as expected by scikit-optimize.
random_seed : int, default=0
Random seed for the optimizer.
warp_inputs : bool, default=True
If True, the optimizer will internally warp the input space for a better model
fit. Can negatively impact running time and required burnin samples.
n_points : int, default=500
Number of points to evaluate the acquisition function on.
n_initial_points : int, default=16
Number of points to pick quasi-randomly to initialize the the model, before
using the acquisition function.
acq_function : str, default="mes"
Acquisition function to use.
acq_function_samples : int, default=1
Number of hyperposterior samples to average the acquisition function over.
resume : bool, default=True
If True, resume optimization from existing data. If False, start with a
completely fresh optimizer.
fast_resume : bool, default=True
If True, restore the optimizer from disk, avoiding costly reinitialization.
If False, reinitialize the optimizer from the existing data.
model_path : str or None, default=None
Path to the file containing the existing optimizer to be used for fast resume
functionality.
gp_initial_burnin : int, default=100
Number of burnin samples to use for reinitialization.
gp_initial_samples : int, default=300
Number of samples to use for reinitialization.
gp_priors : list of callables, default=None
List of priors to be used for the kernel hyperparameters. Specified in the
following order:
- signal magnitude prior
- lengthscale prior (x number of parameters)
- noise magnitude prior
Returns
-------
bask.Optimizer
Optimizer object to be used in the main tuning loop.
"""
logger = logging.getLogger(LOGGER)
# Create random generator:
random_state = setup_random_state(random_seed)
gp_kwargs = dict(
# TODO: Due to a bug in scikit-learn 0.23.2, we set normalize_y=False:
normalize_y=True,
warp_inputs=warp_inputs,
)
opt = Optimizer(
dimensions=parameter_ranges,
n_points=n_points,
n_initial_points=n_initial_points,
# gp_kernel=kernel, # TODO: Let user pass in different kernels
gp_kwargs=gp_kwargs,
gp_priors=gp_priors,
acq_func=acq_function,
acq_func_kwargs=dict(alpha=1.96, n_thompson=500),
random_state=random_state,
)
if not resume:
return opt
reinitialize = True
if model_path is not None and fast_resume:
path = pathlib.Path(model_path)
if path.exists():
with open(model_path, mode="rb") as model_file:
old_opt = dill.load(model_file)
logger.info(f"Resuming from existing optimizer in {model_path}.")
if opt.space == old_opt.space:
old_opt.acq_func = opt.acq_func
old_opt.acq_func_kwargs = opt.acq_func_kwargs
opt = old_opt
reinitialize = False
else:
logger.info(
"Parameter ranges have been changed and the "
"existing optimizer instance is no longer "
"valid. Reinitializing now."
)
if gp_priors is not None:
opt.gp_priors = gp_priors
if reinitialize and len(X) > 0:
logger.info(
f"Importing {len(X)} existing datapoints. " f"This could take a while..."
)
opt.tell(
X,
y,
noise_vector=noise,
gp_burnin=gp_initial_burnin,
gp_samples=gp_initial_samples,
n_samples=acq_function_samples,
progress=True,
)
logger.info("Importing finished.")
return opt
def print_results(
optimizer: Optimizer,
result_object: OptimizeResult,
parameter_names: Sequence[str],
confidence: float = 0.9,
) -> None:
""" Log the current results of the optimizer.
Parameters
----------
optimizer : bask.Optimizer
Fitted Optimizer object.
result_object : scipy.optimize.OptimizeResult
Result object containing the data and the last fitted model.
parameter_names : Sequence of str
Names of the parameters to use for printing.
confidence : float, default=0.9
Confidence used for the confidence intervals.
"""
logger = logging.getLogger(LOGGER)
try:
best_point, best_value = expected_ucb(result_object, alpha=0.0)
best_point_dict = dict(zip(parameter_names, best_point))
with optimizer.gp.noise_set_to_zero():
_, best_std = optimizer.gp.predict(
optimizer.space.transform([best_point]), return_std=True
)
logger.info(f"Current optimum:\n{best_point_dict}")
logger.info(
f"Estimated Elo: {np.around(-best_value * 100, 4)} +- "
f"{np.around(best_std * 100, 4).item()}"
)
confidence_mult = erfinv(confidence) * np.sqrt(2)
lower_bound = np.around(
-best_value * 100 - confidence_mult * best_std * 100, 4
).item()
upper_bound = np.around(
-best_value * 100 + confidence_mult * best_std * 100, 4
).item()
logger.info(
f"{confidence * 100}% confidence interval of the Elo value: "
f"({lower_bound}, "
f"{upper_bound})"
)
confidence_out = confidence_intervals(
optimizer=optimizer,
param_names=parameter_names,
hdi_prob=confidence,
opt_samples=1000,
multimodal=False,
)
logger.info(
f"{confidence * 100}% confidence intervals of the parameters:"
f"\n{confidence_out}"
)
except ValueError:
logger.info(
"Computing current optimum was not successful. "
"This can happen in rare cases and running the "
"tuner again usually works."
)
def plot_results(
optimizer: Optimizer,
result_object: OptimizeResult,
plot_path: str,
parameter_names: Sequence[str],
) -> None:
"""Plot the current results of the optimizer.
Parameters
----------
optimizer : bask.Optimizer
Fitted Optimizer object.
result_object : scipy.optimize.OptimizeResult
Result object containing the data and the last fitted model.
plot_path : str
Path to the directory to which the plots should be saved.
parameter_names : Sequence of str
Names of the parameters to use for plotting.
"""
logger = logging.getLogger(LOGGER)
if optimizer.space.n_dims == 1:
logger.warning("Plotting for only 1 parameter is not supported yet.")
return
logger.debug("Starting to compute the next plot.")
plt.style.use("dark_background")
fig, ax = plt.subplots(
nrows=optimizer.space.n_dims,
ncols=optimizer.space.n_dims,
figsize=(3 * optimizer.space.n_dims, 3 * optimizer.space.n_dims),
)
fig.patch.set_facecolor("#36393f")
for i in range(optimizer.space.n_dims):
for j in range(optimizer.space.n_dims):
ax[i, j].set_facecolor("#36393f")
timestr = time.strftime("%Y%m%d-%H%M%S")
plot_objective(result_object, dimensions=parameter_names, fig=fig, ax=ax)
plotpath = pathlib.Path(plot_path)
plotpath.mkdir(parents=True, exist_ok=True)
full_plotpath = plotpath / f"{timestr}-{len(optimizer.Xi)}.png"
plt.savefig(
full_plotpath, dpi=300, facecolor="#36393f",
)
logger.info(f"Saving a plot to {full_plotpath}.")
plt.close(fig)
def run_match(
rounds=10,
engine1_tc=None,
engine2_tc=None,
engine1_st=None,
engine2_st=None,
engine1_npm=None,
engine2_npm=None,
engine1_ponder=False,
engine2_ponder=False,
timemargin=None,
opening_file=None,
adjudicate_draws=False,
draw_movenumber=1,
draw_movecount=10,
draw_score=8,
adjudicate_resign=False,
resign_movecount=3,
resign_score=550,
adjudicate_tb=False,
tb_path=None,
concurrency=1,
debug_mode=False,
**kwargs,
):
"""Run a cutechess-cli match of two engines with paired random openings.
Parameters
----------
rounds : int, default=10
Number of rounds to play in the match (each round consists of 2 games).
engine1_tc : str or TimeControl object, default=None
Time control to use for the first engine. If str, it can be a
non-increment time control like "10" (10 seconds) or an increment
time control like "5+1.5" (5 seconds total with 1.5 seconds increment).
If None, it is assumed that engine1_npm or engine1_st is provided.
engine2_tc : str or TimeControl object, default=None
See engine1_tc.
engine1_st : str or int, default=None
Time limit in seconds for each move.
If None, it is assumed that engine1_tc or engine1_npm is provided.
engine2_st : str or TimeControl object, default=None
See engine1_tc.
engine1_npm : str or int, default=None
Number of nodes per move the engine is allowed to search.
If None, it is assumed that engine1_tc or engine1_st is provided.
engine2_npm : str or int, default=None
See engine1_npm.
engine1_ponder : bool, default=False
If True, allow engine1 to ponder.
engine2_ponder : bool, default=False
See engine1_ponder.
timemargin : str or int, default=None
Allowed number of milliseconds the engines are allowed to go over the time
limit. If None, the margin is 0.
opening_file : str, default=None
Path to the file containing the openings. Can be .epd or .pgn.
Make sure that the file explicitly has the .epd or .pgn suffix, as it
is used to detect the format.
adjudicate_draws : bool, default=False
Specify, if cutechess-cli is allowed to adjudicate draws, if the
scores of both engines drop below draw_score for draw_movecount number
of moves. Only kicks in after draw_movenumber moves have been played.
draw_movenumber : int, default=1
Number of moves to play after the opening, before draw adjudication is
allowed.
draw_movecount : int, default=10
Number of moves below the threshold draw_score, without captures and
pawn moves, before the game is adjudicated as draw.
draw_score : int, default=8
Score threshold of the engines in centipawns. If the score of both
engines drops below this value for draw_movecount consecutive moves,
and there are no captures and pawn moves, the game is adjudicated as
draw.
adjudicate_resign : bool, default=False
Specify, if cutechess-cli is allowed to adjudicate wins/losses based on
the engine scores. If one engine’s score drops below -resign_score for
resign_movecount many moves, the game is considered a loss for this
engine.
resign_movecount : int, default=3
Number of consecutive moves one engine has to output a score below
the resign_score threshold for the game to be considered a loss for this
engine.
resign_score : int, default=550
Resign score threshold in centipawns. The score of the engine has to
stay below -resign_score for at least resign_movecount moves for it to
be adjudicated as a loss.
adjudicate_tb : bool, default=False
Allow cutechess-cli to adjudicate games based on Syzygy tablebases.
If true, tb_path has to be set.
tb_path : str, default=None
Path to the folder containing the Syzygy tablebases.
concurrency : int, default=1
Number of games to run in parallel. Be careful when running time control
games, since the engines can negatively impact each other when running
in parallel.
debug_mode : bool, default=False
If True, pass ``-debug`` to cutechess-cli.
Yields
-------
out : str
Results of the cutechess-cli match streamed as str.
"""
string_array = ["cutechess-cli"]
string_array.extend(("-concurrency", str(concurrency)))
if (engine1_npm is None and engine1_tc is None and engine1_st is None) or (
engine2_npm is None and engine2_tc is None and engine2_st is None
):
raise ValueError("A valid time control or nodes configuration is required.")
string_array.extend(
_construct_engine_conf(
id=1,
engine_npm=engine1_npm,
engine_tc=engine1_tc,
engine_st=engine1_st,
engine_ponder=engine1_ponder,
timemargin=timemargin,
)
)
string_array.extend(
_construct_engine_conf(
id=2,
engine_npm=engine2_npm,
engine_tc=engine2_tc,
engine_st=engine2_st,
engine_ponder=engine2_ponder,
timemargin=timemargin,
)
)
if opening_file is None:
raise ValueError("Providing an opening file is required.")
opening_path = pathlib.Path(opening_file)
if not opening_path.exists():
raise FileNotFoundError(
f"Opening file the following path was not found: {opening_path}"
)
opening_format = opening_path.suffix
if opening_format not in {".epd", ".pgn"}:
raise ValueError(
"Unable to determine opening format. "
"Make sure to add .epd or .pgn to your filename."
)
string_array.extend(
(
"-openings",
f"file={str(opening_path)}",
f"format={opening_format[1:]}",
"order=random",
)
)
if adjudicate_draws:
string_array.extend(
(
"-draw",
f"movenumber={draw_movenumber}",
f"movecount={draw_movecount}",
f"score={draw_score}",
)
)
if adjudicate_resign:
string_array.extend(
("-resign", f"movecount={resign_movecount}", f"score={resign_score}")
)
if adjudicate_tb:
if tb_path is None:
raise ValueError("No path to tablebases provided.")
tb_path_object = pathlib.Path(tb_path)
if not tb_path_object.exists():
raise FileNotFoundError(
f"No folder found at the following path: {str(tb_path_object)}"
)
string_array.extend(("-tb", str(tb_path_object)))
string_array.extend(("-rounds", f"{rounds}"))
string_array.extend(("-games", "2"))
string_array.append("-repeat")
string_array.append("-recover")
if debug_mode:
string_array.append("-debug")
string_array.extend(("-pgnout", "out.pgn"))
with subprocess.Popen(
string_array, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True
) as popen:
for line in iter(popen.stdout.readline, ""):
yield line
def parse_experiment_result(
outstr,
prior_counts=None,
n_dirichlet_samples=1000000,
score_scale=4.0,
random_state=None,
**kwargs,
):
"""Parse cutechess-cli result output to extract mean score and error.
Here we use a simple pentanomial model to exploit paired openings.
We distinguish the outcomes WW, WD, WL/DD, LD and LL and apply the
following scoring (note, that the optimizer always minimizes the score):
+------+------+-------+-----+-----+
| WW | WD | WL/DD | LD | LL |
+======+======+=======+=====+=====+
| -1.0 | -0.5 | 0.0 | 0.5 | 1.0 |
+------+------+-------+-----+-----+
Note: It is important that the match output was produced using
cutechess-cli using paired openings, otherwise the returned score is
useless.
Parameters
----------
output : string (utf-8)
Match output of cutechess-cli. It assumes the output was coming from
a head-to-head match with paired openings.
prior_counts : list-like float or int, default=None
Pseudo counts to use for WW, WD, WL/DD, LD and LL in the
pentanomial model.
n_dirichlet_samples : int, default = 1 000 000
Number of samples to draw from the Dirichlet distribution in order to
estimate the standard error of the score.
score_scale : float, optional (default=4.0)
Scale of the logistic distribution used to calculate the score. Has to be a
positive real number
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Returns
-------
score : float (in [-1, 1])
Expected (negative) score of the first player (the lower the stronger)
error : float
Estimated standard error of the score. Estimated by repeated draws
from a Dirichlet distribution.
"""
wdl_strings = re.findall(r"Score of.*:\s*([0-9]+\s-\s[0-9]+\s-\s[0-9]+)", outstr)
array = np.array(
[np.array([int(y) for y in re.findall(r"[0-9]+", x)]) for x in wdl_strings]
)
diffs = np.diff(array, axis=0, prepend=np.array([[0, 0, 0]]))
# Parse order of finished games to be able to compute the correct pentanomial scores
finished = np.array(
[int(x) - 1 for x in re.findall(r"Finished game ([0-9]+)", outstr)]
)
diffs = diffs[np.argsort(finished)]
counts = {"WW": 0, "WD": 0, "WL/DD": 0, "LD": 0, "LL": 0}
for i in range(0, len(diffs) - 1, 2):
match = diffs[i] + diffs[i + 1]
if match[0] == 2:
counts["WW"] += 1
elif match[0] == 1:
if match[1] == 1:
counts["WL/DD"] += 1
else:
counts["WD"] += 1
elif match[1] == 1:
counts["LD"] += 1
elif match[2] == 2:
counts["WL/DD"] += 1
else:
counts["LL"] += 1
counts_array = np.array(list(counts.values()))
return counts_to_penta(
counts=counts_array,
prior_counts=prior_counts,
n_dirichlet_samples=n_dirichlet_samples,
score_scale=score_scale,
random_state=random_state,
**kwargs,
)
def update_model(
optimizer: Optimizer,
point: list,
score: float,
variance: float,
acq_function_samples: int = 1,
gp_burnin: int = 5,
gp_samples: int = 300,
gp_initial_burnin: int = 100,
gp_initial_samples: int = 300,
) -> None:
"""Update the optimizer model with the newest data.
Parameters
----------
optimizer : bask.Optimizer
Optimizer object which is to be updated.
point : list
Latest configuration which was tested.
score : float
Elo score the configuration achieved.
variance : float
Variance of the Elo score of the configuration.
acq_function_samples : int, default=1
Number of hyperposterior samples to average the acquisition function over.
gp_burnin : int, default=5
Number of burnin iterations to use before keeping samples for the model.
gp_samples : int, default=300
Number of samples to collect for the model.
gp_initial_burnin : int, default=100
Number of burnin iterations to use for the first initial model fit.
gp_initial_samples : int, default=300
Number of samples to collect
"""
logger = logging.getLogger(LOGGER)
while True:
try:
now = datetime.now()
# We fetch kwargs manually here to avoid collisions:
n_samples = acq_function_samples
gp_burnin = gp_burnin
gp_samples = gp_samples
if optimizer.gp.chain_ is None:
gp_burnin = gp_initial_burnin
gp_samples = gp_initial_samples
optimizer.tell(
x=point,
y=score,
noise_vector=variance,
n_samples=n_samples,
gp_samples=gp_samples,
gp_burnin=gp_burnin,
)
later = datetime.now()
difference = (later - now).total_seconds()
logger.info(f"GP sampling finished ({difference}s)")
logger.debug(f"GP kernel: {optimizer.gp.kernel_}")
except ValueError:
logger.warning(
"Error encountered during fitting. Trying to sample chain a bit. "
"If this problem persists, restart the tuner to reinitialize."
)
optimizer.gp.sample(n_burnin=11, priors=optimizer.gp_priors)
else:
break
def _construct_engine_conf(
id,
engine_npm=None,
engine_tc=None,
engine_st=None,
engine_ponder=False,
timemargin=None,
):
result = ["-engine", f"conf=engine{id}"]
if engine_npm is not None:
result.extend(("tc=inf", f"nodes={engine_npm}"))
return result
if engine_st is not None:
result.append(f"st={str(engine_st)}")
if timemargin is not None:
result.append(f"timemargin={str(timemargin)}")
if engine_ponder:
result.append("ponder")
return result
if isinstance(engine_tc, str):
engine_tc = TimeControl.from_string(engine_tc)
result.append(f"tc={str(engine_tc)}")
if timemargin is not None:
result.append(f"timemargin={str(timemargin)}")
if engine_ponder:
result.append("ponder")
return result
|
fsmosca/chess-tuning-tools | tune/plots.py | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import LogLocator
from skopt.plots import _format_scatter_plot_axes
from tune.utils import expected_ucb
__all__ = ["partial_dependence", "plot_objective"]
def _evenly_sample(dim, n_points):
"""Return `n_points` evenly spaced points from a Dimension.
Parameters
----------
dim : `Dimension`
The Dimension to sample from. Can be categorical; evenly-spaced
category indices are chosen in order without replacement (result
may be smaller than `n_points`).
n_points : int
The number of points to sample from `dim`.
Returns
-------
xi : np.array
The sampled points in the Dimension. For Categorical
dimensions, returns the index of the value in
`dim.categories`.
xi_transformed : np.array
The transformed values of `xi`, for feeding to a model.
"""
cats = np.array(getattr(dim, "categories", []), dtype=object)
if len(cats): # Sample categoricals while maintaining order
xi = np.linspace(0, len(cats) - 1, min(len(cats), n_points), dtype=int)
xi_transformed = dim.transform(cats[xi])
else:
bounds = dim.bounds
# XXX use linspace(*bounds, n_points) after python2 support ends
if dim.prior == "log-uniform":
xi = np.logspace(np.log10(bounds[0]), np.log10(bounds[1]), n_points)
else:
xi = np.linspace(bounds[0], bounds[1], n_points)
xi_transformed = dim.transform(xi)
return xi, xi_transformed
def partial_dependence(
space, model, i, j=None, sample_points=None, n_samples=250, n_points=40, x_eval=None
):
"""Calculate the partial dependence for dimensions `i` and `j` with
respect to the objective value, as approximated by `model`.
The partial dependence plot shows how the value of the dimensions
`i` and `j` influence the `model` predictions after "averaging out"
the influence of all other dimensions.
When `x_eval` is not `None`, the given values are used instead of
random samples. In this case, `n_samples` will be ignored.
Parameters
----------
space : `Space`
The parameter space over which the minimization was performed.
model
Surrogate model for the objective function.
i : int
The first dimension for which to calculate the partial dependence.
j : int, default=None
The second dimension for which to calculate the partial dependence.
To calculate the 1D partial dependence on `i` alone set `j=None`.
sample_points : np.array, shape=(n_points, n_dims), default=None
Only used when `x_eval=None`, i.e in case partial dependence should
be calculated.
Randomly sampled and transformed points to use when averaging
the model function at each of the `n_points` when using partial
dependence.
n_samples : int, default=100
Number of random samples to use for averaging the model function
at each of the `n_points` when using partial dependence. Only used
when `sample_points=None` and `x_eval=None`.
n_points : int, default=40
Number of points at which to evaluate the partial dependence
along each dimension `i` and `j`.
x_eval : list, default=None
`x_eval` is a list of parameter values or None. In case `x_eval`
is not None, the parsed dependence will be calculated using these
values.
Otherwise, random selected samples will be used.
Returns
-------
For 1D partial dependence:
xi : np.array
The points at which the partial dependence was evaluated.
yi : np.array
The value of the model at each point `xi`.
For 2D partial dependence:
xi : np.array, shape=n_points
The points at which the partial dependence was evaluated.
yi : np.array, shape=n_points
The points at which the partial dependence was evaluated.
zi : np.array, shape=(n_points, n_points)
The value of the model at each point `(xi, yi)`.
For Categorical variables, the `xi` (and `yi` for 2D) returned are
the indices of the variable in `Dimension.categories`.
"""
# The idea is to step through one dimension, evaluating the model with
# that dimension fixed and averaging either over random values or over
# the given ones in x_val in all other dimensions.
# (Or step through 2 dimensions when i and j are given.)
# Categorical dimensions make this interesting, because they are one-
# hot-encoded, so there is a one-to-many mapping of input dimensions
# to transformed (model) dimensions.
# If we haven't parsed an x_eval list we use random sampled values instead
if x_eval is None and sample_points is None:
sample_points = space.transform(space.rvs(n_samples=n_samples))
elif sample_points is None:
sample_points = space.transform([x_eval])
# dim_locs[i] is the (column index of the) start of dim i in
# sample_points.
# This is usefull when we are using one hot encoding, i.e using
# categorical values
dim_locs = np.cumsum([0] + [d.transformed_size for d in space.dimensions])
if j is None:
# We sample evenly instead of randomly. This is necessary when using
# categorical values
xi, xi_transformed = _evenly_sample(space.dimensions[i], n_points)
yi = []
for x_ in xi_transformed:
rvs_ = np.array(sample_points) # copy
# We replace the values in the dimension that we want to keep
# fixed
rvs_[:, dim_locs[i] : dim_locs[i + 1]] = x_
# In case of `x_eval=None` rvs conists of random samples.
# Calculating the mean of these samples is how partial dependence
# is implemented.
yi.append(np.mean(model.predict(rvs_)))
return xi, yi
else:
xi, xi_transformed = _evenly_sample(space.dimensions[j], n_points)
yi, yi_transformed = _evenly_sample(space.dimensions[i], n_points)
zi = []
for x_ in xi_transformed:
row = []
for y_ in yi_transformed:
rvs_ = np.array(sample_points) # copy
rvs_[:, dim_locs[j] : dim_locs[j + 1]] = x_
rvs_[:, dim_locs[i] : dim_locs[i + 1]] = y_
row.append(np.mean(model.predict(rvs_)))
zi.append(row)
return xi, yi, np.array(zi).T
def plot_objective(
result,
levels=20,
n_points=200,
n_samples=30,
size=3,
zscale="linear",
dimensions=None,
n_random_restarts=100,
alpha=0.25,
margin=0.65,
colors=None,
fig=None,
ax=None,
):
"""Pairwise partial dependence plot of the objective function.
The diagonal shows the partial dependence for dimension `i` with
respect to the objective function. The off-diagonal shows the
partial dependence for dimensions `i` and `j` with
respect to the objective function. The objective function is
approximated by `result.model.`
Pairwise scatter plots of the points at which the objective
function was directly evaluated are shown on the off-diagonal.
A red point indicates the found minimum.
Note: search spaces that contain `Categorical` dimensions are
currently not supported by this function.
Parameters
----------
* `result` [`OptimizeResult`]
The result for which to create the scatter plot matrix.
* `levels` [int, default=10]
Number of levels to draw on the contour plot, passed directly
to `plt.contour()`.
* `n_points` [int, default=40]
Number of points at which to evaluate the partial dependence
along each dimension.
* `n_samples` [int, default=250]
Number of random samples to use for averaging the model function
at each of the `n_points`.
* `size` [float, default=2]
Height (in inches) of each facet.
* `zscale` [str, default='linear']
Scale to use for the z axis of the contour plots. Either 'linear'
or 'log'.
* `dimensions` [list of str, default=None] Labels of the dimension
variables. `None` defaults to `space.dimensions[i].name`, or
if also `None` to `['X_0', 'X_1', ..]`.
* `n_random_restarts` [int, default=100]
Number of restarts to try to find the global optimum.
* `alpha` [float, default=0.25]
Transparency of the sampled points.
* `margin` [float, default=0.65]
Margin in inches around the plot.
* `colors` [list of tuples, default=None]
Colors to use for the optima.
* `fig` [Matplotlib figure, default=None]
Figure to use for plotting. If None, it will create one.
* `ax` [k x k axes, default=None]
Axes on which to plot the marginals. If None, it will create appropriate
axes.
Returns
-------
* `ax`: [`Axes`]:
The matplotlib axes.
"""
if colors is None:
colors = plt.cm.get_cmap("Set3").colors
space = result.space
samples = np.asarray(result.x_iters)
rvs_transformed = space.transform(space.rvs(n_samples=n_samples))
if zscale == "log":
locator = LogLocator()
elif zscale == "linear":
locator = None
else:
raise ValueError(
"Valid values for zscale are 'linear' and 'log'," " not '%s'." % zscale
)
if fig is None:
fig, ax = plt.subplots(
space.n_dims,
space.n_dims,
figsize=(size * space.n_dims, size * space.n_dims),
)
width, height = fig.get_size_inches()
fig.subplots_adjust(
left=margin / width,
right=1 - margin / width,
bottom=margin / height,
top=1 - margin / height,
hspace=0.1,
wspace=0.1,
)
failures = 0
while True:
try:
with result.models[-1].noise_set_to_zero():
min_x = expected_ucb(
result, alpha=0.0, n_random_starts=n_random_restarts
)[0]
min_ucb = expected_ucb(result, n_random_starts=n_random_restarts)[0]
except ValueError:
failures += 1
if failures == 10:
break
continue
else:
break
for i in range(space.n_dims):
for j in range(space.n_dims):
if i == j:
xi, yi = partial_dependence(
space,
result.models[-1],
i,
j=None,
sample_points=rvs_transformed,
n_points=n_points,
)
yi_min, yi_max = np.min(yi), np.max(yi)
ax[i, i].plot(xi, yi, color=colors[1])
if failures != 10:
ax[i, i].axvline(min_x[i], linestyle="--", color=colors[3], lw=1)
ax[i, i].axvline(min_ucb[i], linestyle="--", color=colors[5], lw=1)
ax[i, i].text(
min_x[i],
yi_min + 0.9 * (yi_max - yi_min),
f"{np.around(min_x[i], 4)}",
color=colors[3],
)
ax[i, i].text(
min_ucb[i],
yi_min + 0.7 * (yi_max - yi_min),
f"{np.around(min_ucb[i], 4)}",
color=colors[5],
)
# lower triangle
elif i > j:
xi, yi, zi = partial_dependence(
space, result.models[-1], i, j, rvs_transformed, n_points
)
ax[i, j].contourf(xi, yi, zi, levels, locator=locator, cmap="viridis_r")
ax[i, j].scatter(
samples[:, j], samples[:, i], c="k", s=10, lw=0.0, alpha=alpha
)
if failures != 10:
ax[i, j].scatter(min_x[j], min_x[i], c=["r"], s=20, lw=0.0)
ax[i, j].scatter(
min_ucb[j], min_ucb[i], c=["xkcd:orange"], s=20, lw=0.0
)
# Get all dimensions.
plot_dims = []
for row in range(space.n_dims):
if space.dimensions[row].is_constant:
continue
plot_dims.append((row, space.dimensions[row]))
return _format_scatter_plot_axes(
ax,
space,
ylabel="Partial dependence",
plot_dims=plot_dims,
dim_labels=dimensions,
)
|
fsmosca/chess-tuning-tools | tune/db_workers/dbmodels/__init__.py | from tune.db_workers.dbmodels.base_model import Base
from tune.db_workers.dbmodels.models import *
__all__ = ["Base", "SqlTune", "SqlJob", "SqlUCIParam", "SqlTimeControl", "SqlResult"]
|
fsmosca/chess-tuning-tools | tests/test_dbutils.py | #!/usr/bin/env python
from decimal import Decimal
import numpy as np
from numpy.testing import assert_almost_equal
from tune.db_workers.utils import (
TimeControl,
compute_probabilities,
compute_probabilities_for_bias,
draw_rate_to_elo,
elo_to_bayeselo,
ldw_probabilities,
penta,
penta_to_score,
)
def test_penta():
ldw1 = np.array([0.1, 0.2, 0.7])
ldw2 = np.array([0.2, 0.2, 0.6])
result = penta(ldw1, ldw2)
expected = np.array([0.02, 0.06, 0.24, 0.26, 0.42])
assert_almost_equal(result, expected, decimal=3)
def test_ldw_probabilities():
result = ldw_probabilities(elo=50, draw_elo=200, bias=200)
expected = np.array([0.06975828735890623, 0.35877859523271227, 0.5714631174083815])
assert_almost_equal(result, expected)
def test_draw_rate_to_elo():
result = draw_rate_to_elo(0.5)
expected = np.array(190.84850188786498)
assert_almost_equal(result, expected)
def test_compute_probabilities_for_bias():
result = compute_probabilities_for_bias(elo=50, draw_elo=200, bias=200)
expected = np.array([0.029894, 0.18540627, 0.41591514, 0.30154527, 0.06723932])
assert_almost_equal(result, expected)
def test_compute_probabilities():
result = compute_probabilities(elo=50, draw_elo=200, biases=(0, 200))
expected = np.array(
[
0.033318056828286285,
0.19078749500997028,
0.3957332350793835,
0.30255132321508954,
0.07760988986727044,
]
)
assert_almost_equal(result, expected)
def test_elo_to_bayeselo():
result = elo_to_bayeselo(elo=50, draw_elo=200, biases=(0, 200))
expected = 71.513929
assert_almost_equal(result, expected, decimal=5)
def test_penta_to_score():
counts = np.array([1, 2, 3, 4, 5])
result = penta_to_score(draw_rate=0.5, counts=counts, prior_games=10, prior_elo=0)
expected = 0.4016368226279837
assert_almost_equal(result, expected)
def test_timecontrol():
strings = ("3.0+0.03", "7.0+0.0")
result = TimeControl.from_strings(*strings)
expected = (Decimal("3.0"), Decimal("0.03"), Decimal(7), Decimal(0))
assert result == expected
assert result.to_strings() == strings
|
defunSM/SpecialRel | SpaceTimeGraph.py | import sys, os, math
import optparse
import SpecialRel as sprl
import matplotlib.pyplot as plt
import numpy as np
def time_of_the_object(v, b, x):
return (1 / v) * x + b
def given_velocity_and_initial_position(v, b=0, limit=11, lower_limit=-11):
data = []
for i in np.arange(lower_limit,limit):
data.append(i)
data.append(time_of_the_object(v, b, i))
print(data)
return data
def retrieve_ycoords(data):
y_axis_coord = []
for i in np.arange(len(data)):
if (i % 2 != 0):
y_axis_coord.append(data[i])
return y_axis_coord
def retrieve_xcoords(data):
x_axis_coord = []
for i in np.arange(len(data)):
if (i % 2 == 0):
x_axis_coord.append(int(data[i]))
return x_axis_coord
def space_time_graph(data, name_of_object="object"):
x = retrieve_xcoords(data)
y = retrieve_ycoords(data)
slope, intercept = np.polyfit(x, y, 1)
# Create a list of values in the best fit line
abline_values = [slope * i + intercept for i in x]
plt.axhline(linewidth=2, color='black')
plt.axvline(linewidth=2, color='black')
# Plot the best fit line over the actual values
plt.ylabel('Time (s)')
plt.xlabel('Space (m)')
plt.grid(True)
plt.plot(x, y, '--', label=name_of_object)
plt.legend(loc='upper left')
plt.axis('tight')
# plt.plot(x, abline_values, 'b')
plt.title("SpaceTime Graph")
plt.show()
def main():
p = optparse.OptionParser()
p.add_option('--velocity', '-v', default=0.2)
p.add_option('--intercept', '-b', default=0)
p.add_option('--upperlimit', '-u', default=11)
p.add_option('--lowerlimit', '-l', default=-11)
p.add_option('--objectname', '-n', default="object")
p.add_option('--gammafactor', '-g', default=1)
options, arguments = p.parse_args()
v = float(options.velocity)
b = float(options.intercept)
ul = float(options.upperlimit)
ll = float(options.lowerlimit)
n = options.objectname
data = given_velocity_and_initial_position(v, b, ul, ll)
space_time_graph(data, n)
if __name__=="__main__":
main()
|
defunSM/SpecialRel | SpecialRel.py | <gh_stars>1-10
#!/usr/bin/env python
import sys, os, math
c = 299792458
def main():
print("SpecialRel.py has been loaded.")
def ctometer(a): # Converts the speed that is in terms of c to meters per second.
return a * c
def metertoc(a): # Converts the speeed that is in terms of meters per second into terms of c.
return float(a / c)
def gamma(v): # Calculates the gamma taking the speed in terms of c.
return float((1 / math.sqrt((1 - (v**2)))))
def ctogamma(v): # Calculates the value of gamma based on the speed in terms of meters per second.
return float(gamma(metertoc(v)))
def xlorentz(speed, x, t): # Calculates the lorentz transformation for space.
return float(gamma(speed) * (x - speed * t))
def tlorentz(speed, x, t): # Calculates the lorentz transformation for time.
return float(gamma(speed) * (t - speed * x))
def vlorentz(v, w): # Calculates the lorentz transformation for velocity.
return float((v - w) / (1 - (v * w)))
def timedialation(v, t): # Calculates the time dialation based on the velocity and time.
return float(gamma(v) * t)
def lengthcontraction(v, h): # Calculates the length contraction based on the velocity and time.
return float(h / (gamma(v)))
def spacetimeinterval(x, t): # Calculates the spacetime interval based on the space and time.
return float(math.sqrt((t**2) -(x**2)))
def spacetimeinterval2(t1, t2, x1, x2):
return float(math.sqrt((t2 - t1)**2 - (x2 - x1)**2))
def spaceinterval(x1, x2):
return float(x2 - x1)
def timeinterval(t1, t2):
return float(t2 - t1)
def restenergy(m, c):
return float(m * c * c)
def totalenergy(m, v):
gamma_factor = gamma(v)
return float(gamma_factor * m * 3e16)
def totalmomentum(m, v):
gamma_factor = gamma(v)
return float(gamma_factor * m * v)
if __name__=="__main__":
main()
|
adeel/timed | scripts/011_to_012.py | #!/usr/bin/env python
"Converts a timed-0.11-style log file to a timed-0.12-style log file."
import timed
import yaml
def read():
data = open(timed.log_file).read()
if not data:
return []
return yaml.safe_load(data)
logs = read()
timed.save(logs) |
adeel/timed | timed/client.py | "Timed: a command-line time tracker."
__name__ = 'client'
import sys
import os.path
import datetime
from termcolor import colored
from timed import server
from timed import cmdapp
now = datetime.datetime.now
def main():
cmdapp.main(name='timed', desc=__doc__, config={
'logfile': os.path.expanduser('~/.timed'),
'time_format': '%H:%M on %d %b %Y'})
@cmdapp.cmd
def summary(logfile, time_format):
"show a summary of all projects"
def output(summary):
width = max([len(p[0]) for p in summary]) + 3
print '\n'.join([
"%s%s%s" % (p[0], ' ' * (width - len(p[0])),
colored(minutes_to_txt(p[1]), 'red')) for p in summary])
output(server.summarize(read(logfile, time_format, only_elapsed=True)))
@cmdapp.cmd
@cmdapp.default
def status(logfile, time_format):
"show current status"
try:
r = read(logfile, time_format)[-1]
if r[1][1]:
return summary(logfile, time_format)
else:
print "working on %s" % colored(r[0], attrs=['bold'])
print " since %s" % colored(
server.date_to_txt(r[1][0], time_format), 'green')
print " to now, %s" % colored(
server.date_to_txt(now(), time_format), 'green')
print " => %s elapsed" % colored(time_elapsed(r[1][0]), 'red')
except IndexError:
return cmdapp.help()
@cmdapp.cmd
def start(project, logfile, time_format):
"start tracking for <project>"
records = read(logfile, time_format)
if records and not records[-1][1][1]:
print "error: there is a project already active"
return
write(server.start(project, records), logfile, time_format)
print "starting work on %s" % colored(project, attrs=['bold'])
print " at %s" % colored(server.date_to_txt(now(), time_format), 'green')
@cmdapp.cmd
def stop(logfile, time_format):
"stop tracking for the active project"
def save_and_output(records):
records = server.stop(records)
write(records, logfile, time_format)
def output(r):
print "worked on %s" % colored(r[0], attrs=['bold'])
print " from %s" % colored(
server.date_to_txt(r[1][0], time_format), 'green')
print " to now, %s" % colored(
server.date_to_txt(r[1][1], time_format), 'green')
print " => %s elapsed" % colored(
time_elapsed(r[1][0], r[1][1]), 'red')
output(records[-1])
save_and_output(read(logfile, time_format))
@cmdapp.cmd
def parse(logfile, time_format):
"parses a stream with text formatted as a Timed logfile and shows a summary"
records = [server.record_from_txt(line, only_elapsed=True,
time_format=time_format) for line in sys.stdin.readlines()]
# TODO: make this code better.
def output(summary):
width = max([len(p[0]) for p in summary]) + 3
print '\n'.join([
"%s%s%s" % (p[0], ' ' * (width - len(p[0])),
colored(minutes_to_txt(p[1]), 'red')) for p in summary])
output(server.summarize(records))
@cmdapp.cmd
def projects(logfile, time_format):
"prints a newline-separated list of all projects"
print '\n'.join(server.list_projects(read(logfile, time_format)))
def read(logfile, time_format, only_elapsed=False):
return [server.record_from_txt(line, only_elapsed=only_elapsed,
time_format=time_format) for line in open(
os.path.expanduser(logfile) ,'a+').readlines()]
def write(records, logfile, time_format):
try:
open(logfile, 'w').write('\n'.join(
[server.record_to_txt(record, time_format) for record in records]))
except IOError:
print "error: could not open log file for writing: %s" % logfile
def time_elapsed(start, end=None):
return minutes_to_txt(server.minutes_elapsed(start, end))
def minutes_to_txt(delta):
hour = delta / 60
min = delta - 60 * hour
return "%sh%sm" % (hour, min)
|
adeel/timed | timed/cmdapp.py | <filename>timed/cmdapp.py<gh_stars>10-100
import sys
program = {}
handlers = {}
def main(name, desc, config={}):
program['name'] = name
program['desc'] = desc
if len(sys.argv) < 2:
command, args = '', []
else:
command, args = sys.argv[1], sys.argv[2:]
options = config
for i, arg in enumerate(args):
if arg.startswith('--'):
opt = arg.lstrip('--')
try:
key, val = opt.split('=')
except ValueError:
key = opt.split('=')
val = True
options[key] = val
del args[i]
if command in handlers:
handler = handlers[command]
else:
handler = help
try:
handler(*args, **options)
except Exception as e:
print "error: %s" % str(e)
def cmd(handler):
handlers[handler.__name__] = handler
return handler
def default(handler):
handlers[''] = handler
return handler
def help(**options):
subcmds = []
for name, handler in handlers.items():
syntax = (program['name'] + ' ' + name).strip()
usage = ' %s: %s' % (syntax, handler.__doc__)
subcmds.append(usage)
subcmds = '\n'.join(subcmds)
doc = """%s
Usage:
%s""" % (program['desc'], subcmds)
print doc
|
adeel/timed | timed/server.py | import datetime
import itertools
from operator import itemgetter
def summarize(records):
return [(r[0], sum(s[1] for s in r[1])) for r in
itertools.groupby(sorted(records, key=itemgetter(0)), itemgetter(0))]
def start(project, records):
if records and not records[-1][1][1]:
return records
return records + [(project, (datetime.datetime.now(), None))]
def stop(records):
if records and not records[-1][1][1]:
return records[:-1] + \
[(lambda r: (r[0], (r[1][0], datetime.datetime.now())))(records[-1])]
return records
def list_projects(records):
return [r[0] for r in
itertools.groupby(sorted(records, key=itemgetter(0)), itemgetter(0))]
def record_from_txt(line, only_elapsed=False, time_format='%H:%M on %d %b %Y'):
try:
def transform(record):
if only_elapsed:
return (record[0], minutes_elapsed(
date_from_txt(record[1][0], time_format),
date_from_txt(record[1][1], time_format)))
else:
return (record[0], (date_from_txt(record[1][0], time_format),
date_from_txt(record[1][1], time_format)))
return transform((lambda project, times: (project.strip(), (
lambda start, end: (start.strip(), end.strip()))(
*times.split(' - '))))(*line.split(':', 1)))
except ValueError:
raise SyntaxError(line)
def record_to_txt(record, time_format):
return "%s: %s - %s" % (record[0],
date_to_txt(record[1][0], time_format),
date_to_txt(record[1][1], time_format))
def date_from_txt(date, time_format):
if not date:
return None
return datetime.datetime.strptime(date, time_format)
def date_to_txt(date, time_format):
if not date:
return ''
return date.strftime(time_format)
def minutes_elapsed(start, end=None):
if not end:
end = datetime.datetime.now()
return (end - start).seconds / 60
class SyntaxError(Exception):
def __init__(self, line):
self.line = line
def __str__(self):
return "syntax error on this line:\n %s" % repr(self.line)
|
adeel/timed | setup.py | from distutils.core import setup
setup(
name='timed',
version='0.4.1',
description="command-line time tracker",
url='http://adeel.github.com/timed',
author='<NAME>',
author_email='<EMAIL>',
packages=['timed'],
scripts=['bin/timed'],
install_requires=['termcolor'],
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: POSIX',
'Topic :: Utilities',
]
)
|
minervaproject/dd-trace-py | tests/profiling/test_profiler.py | <gh_stars>1-10
import pytest
import ddtrace
from ddtrace.profiling import profiler
from ddtrace.profiling.collector import stack
from ddtrace.profiling.exporter import http
def test_status():
p = profiler.Profiler()
assert repr(p.status) == "STOPPED"
p.start()
assert repr(p.status) == "RUNNING"
p.stop()
assert repr(p.status) == "STOPPED"
def test_restart():
p = profiler.Profiler()
p.start()
p.stop(flush=False)
p.start()
p.stop(flush=False)
def test_multiple_stop():
"""Check that the profiler can be stopped twice.
This is useful since the atexit.unregister call might not exist on Python 2,
therefore the profiler can be stopped twice (once per the user, once at exit).
"""
p = profiler.Profiler()
p.start()
p.stop()
p.stop()
@pytest.mark.parametrize(
"service_name_var", ("DD_SERVICE", "DD_SERVICE_NAME", "DATADOG_SERVICE_NAME"),
)
def test_default_from_env(service_name_var, monkeypatch):
monkeypatch.setenv("DD_API_KEY", "foobar")
monkeypatch.setenv(service_name_var, "foobar")
prof = profiler.Profiler()
for exporter in prof.exporters:
if isinstance(exporter, http.PprofHTTPExporter):
assert exporter.service == "foobar"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_service_api(monkeypatch):
monkeypatch.setenv("DD_API_KEY", "foobar")
prof = profiler.Profiler(service="foobar")
assert prof.service == "foobar"
for exporter in prof.exporters:
if isinstance(exporter, http.PprofHTTPExporter):
assert exporter.service == "foobar"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_tracer_api(monkeypatch):
monkeypatch.setenv("DD_API_KEY", "foobar")
prof = profiler.Profiler(tracer=ddtrace.tracer)
assert prof.tracer == ddtrace.tracer
for collector in prof.collectors:
if isinstance(collector, stack.StackCollector):
assert collector.tracer == ddtrace.tracer
break
else:
pytest.fail("Unable to find stack collector")
def test_env_default(monkeypatch):
monkeypatch.setenv("DD_API_KEY", "foobar")
monkeypatch.setenv("DD_ENV", "staging")
monkeypatch.setenv("DD_VERSION", "123")
prof = profiler.Profiler()
assert prof.env == "staging"
assert prof.version == "123"
for exporter in prof.exporters:
if isinstance(exporter, http.PprofHTTPExporter):
assert exporter.env == "staging"
assert exporter.version == "123"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_env_api():
prof = profiler.Profiler(env="staging", version="123")
assert prof.env == "staging"
assert prof.version == "123"
for exporter in prof.exporters:
if isinstance(exporter, http.PprofHTTPExporter):
assert exporter.env == "staging"
assert exporter.version == "123"
break
else:
pytest.fail("Unable to find HTTP exporter")
@pytest.mark.parametrize(
"name_var", ("DD_API_KEY", "DD_PROFILING_API_KEY"),
)
def test_env_api_key(name_var, monkeypatch):
monkeypatch.setenv(name_var, "foobar")
prof = profiler.Profiler()
for exporter in prof.exporters:
if isinstance(exporter, http.PprofHTTPExporter):
assert exporter.api_key == "foobar"
assert exporter.endpoint == "https://intake.profile.datadoghq.com/v1/input"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_env_no_api_key():
prof = profiler.Profiler()
for exporter in prof.exporters:
if isinstance(exporter, http.PprofHTTPExporter):
assert exporter.api_key is None
assert exporter.endpoint == "http://localhost:8126/profiling/v1/input"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_env_endpoint_url(monkeypatch):
monkeypatch.setenv("DD_AGENT_HOST", "foobar")
monkeypatch.setenv("DD_TRACE_AGENT_PORT", "123")
prof = profiler.Profiler()
for exporter in prof.exporters:
if isinstance(exporter, http.PprofHTTPExporter):
assert exporter.api_key is None
assert exporter.endpoint == "http://foobar:123/profiling/v1/input"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_env_endpoint_url_no_agent(monkeypatch):
monkeypatch.setenv("DD_SITE", "datadoghq.eu")
monkeypatch.setenv("DD_API_KEY", "123")
prof = profiler.Profiler()
for exporter in prof.exporters:
if isinstance(exporter, http.PprofHTTPExporter):
assert exporter.api_key == "123"
assert exporter.endpoint == "https://intake.profile.datadoghq.eu/v1/input"
break
else:
pytest.fail("Unable to find HTTP exporter")
|
minervaproject/dd-trace-py | ddtrace/profiling/profiler.py | # -*- encoding: utf-8 -*-
import atexit
import logging
import os
from ddtrace.profiling import recorder
from ddtrace.profiling import scheduler
from ddtrace.utils import deprecation
from ddtrace.vendor import attr
from ddtrace.profiling.collector import exceptions
from ddtrace.profiling.collector import memory
from ddtrace.profiling.collector import stack
from ddtrace.profiling.collector import threading
from ddtrace.profiling.exporter import file
from ddtrace.profiling.exporter import http
LOG = logging.getLogger(__name__)
ENDPOINT_TEMPLATE = "https://intake.profile.{}/v1/input"
def _get_endpoint():
legacy = os.environ.get("DD_PROFILING_API_URL")
if legacy:
deprecation.deprecation("DD_PROFILING_API_URL", "Use DD_SITE")
return legacy
site = os.environ.get("DD_SITE", "datadoghq.com")
return ENDPOINT_TEMPLATE.format(site)
def _get_api_key():
legacy = os.environ.get("DD_PROFILING_API_KEY")
if legacy:
deprecation.deprecation("DD_PROFILING_API_KEY", "Use DD_API_KEY")
return legacy
return os.environ.get("DD_API_KEY")
def _build_default_exporters(service, env, version):
_OUTPUT_PPROF = os.environ.get("DD_PROFILING_OUTPUT_PPROF")
if _OUTPUT_PPROF:
return [
file.PprofFileExporter(_OUTPUT_PPROF),
]
api_key = _get_api_key()
if api_key:
# Agentless mode
endpoint = _get_endpoint()
else:
hostname = os.environ.get("DD_AGENT_HOST", os.environ.get("DATADOG_TRACE_AGENT_HOSTNAME", "localhost"))
port = int(os.environ.get("DD_TRACE_AGENT_PORT", 8126))
endpoint = os.environ.get("DD_TRACE_AGENT_URL", "http://%s:%d" % (hostname, port)) + "/profiling/v1/input"
return [
http.PprofHTTPExporter(service=service, env=env, version=version, api_key=api_key, endpoint=endpoint),
]
def _get_service_name():
for service_name_var in ("DD_SERVICE", "DD_SERVICE_NAME", "DATADOG_SERVICE_NAME"):
service_name = os.environ.get(service_name_var)
if service_name is not None:
return service_name
# This ought to use `enum.Enum`, but since it's not available in Python 2, we just use a dumb class.
@attr.s(repr=False)
class ProfilerStatus(object):
"""A Profiler status."""
status = attr.ib()
def __repr__(self):
return self.status.upper()
ProfilerStatus.STOPPED = ProfilerStatus("stopped")
ProfilerStatus.RUNNING = ProfilerStatus("running")
@attr.s
class Profiler(object):
"""Run profiling while code is executed.
Note that the whole Python process is profiled, not only the code executed. Data from all running threads are
caught.
If no collectors are provided, default ones are created.
If no exporters are provided, default ones are created.
"""
service = attr.ib(factory=_get_service_name)
env = attr.ib(factory=lambda: os.environ.get("DD_ENV"))
version = attr.ib(factory=lambda: os.environ.get("DD_VERSION"))
tracer = attr.ib(default=None)
collectors = attr.ib(default=None)
exporters = attr.ib(default=None)
_schedulers = attr.ib(init=False, factory=list)
status = attr.ib(init=False, type=ProfilerStatus, default=ProfilerStatus.STOPPED)
@staticmethod
def _build_default_collectors(tracer):
r = recorder.Recorder(
max_events={
# Allow to store up to 10 threads for 60 seconds at 100 Hz
stack.StackSampleEvent: 10 * 60 * 100,
stack.StackExceptionSampleEvent: 10 * 60 * 100,
# This can generate one event every 0.1s if 100% are taken — though we take 5% by default.
# = (60 seconds / 0.1 seconds)
memory.MemorySampleEvent: int(60 / 0.1),
},
default_max_events=int(os.environ.get("DD_PROFILING_MAX_EVENTS", recorder.Recorder._DEFAULT_MAX_EVENTS)),
)
return [
stack.StackCollector(r, tracer=tracer),
memory.MemoryCollector(r),
exceptions.UncaughtExceptionCollector(r),
threading.LockCollector(r),
]
def __attrs_post_init__(self):
if self.collectors is None:
self.collectors = self._build_default_collectors(self.tracer)
if self.exporters is None:
self.exporters = _build_default_exporters(self.service, self.env, self.version)
if self.exporters:
for rec in self.recorders:
self._schedulers.append(scheduler.Scheduler(recorder=rec, exporters=self.exporters))
@property
def recorders(self):
return set(c.recorder for c in self.collectors)
def start(self, stop_on_exit=True):
"""Start the profiler.
:param stop_on_exit: Whether to stop the profiler and flush the profile on exit.
"""
for col in self.collectors:
try:
col.start()
except RuntimeError:
# `tracemalloc` is unavailable?
pass
for s in self._schedulers:
s.start()
self.status = ProfilerStatus.RUNNING
if stop_on_exit:
atexit.register(self.stop)
def stop(self, flush=True):
"""Stop the profiler.
:param flush: Wait for the flush of the remaining events before stopping.
"""
for col in reversed(self.collectors):
col.stop()
for col in reversed(self.collectors):
col.join()
for s in reversed(self._schedulers):
s.stop()
if flush:
for s in reversed(self._schedulers):
s.join()
self.status = ProfilerStatus.STOPPED
# Python 2 does not have unregister
if hasattr(atexit, "unregister"):
# You can unregister a method that was not registered, so no need to do any other check
atexit.unregister(self.stop)
|
minervaproject/dd-trace-py | ddtrace/contrib/redis/__init__.py | <reponame>minervaproject/dd-trace-py<filename>ddtrace/contrib/redis/__init__.py
"""
Traces redis client queries.
If you are not autoinstrumenting with ``ddtrace-run`` then install the redis
instrumentation with::
from ddtrace import patch
patch(redis=True)
Global Configuration
~~~~~~~~~~~~~~~~~~~~
.. py:data:: ddtrace.config.redis["service"]
The service name reported by default for your redis instances.
Default: ``"redis"``
Instance Configuration
~~~~~~~~~~~~~~~~~~~~~~
from ddtrace import Pin
import redis
# Override service name for this instance
Pin.override(client, service="redis-queue")
# This will report a span with the default settings
client = redis.StrictRedis(host="localhost", port=6379)
client.get("my-key")
"""
from ...utils.importlib import require_modules
required_modules = ["redis", "redis.client"]
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .patch import patch
from .tracers import get_traced_redis, get_traced_redis_from
__all__ = ["get_traced_redis", "get_traced_redis_from", "patch"]
|
dblevin1/TheHaloMod | halomod_app/form_utils.py | """
Defines custom meta-forms and other utilities to make forms easier.
"""
import logging
import re
from collections import OrderedDict
from crispy_forms.bootstrap import Tab
from crispy_forms.layout import Div, Field
from django import forms
from django.utils.safestring import mark_safe
from halomod import TracerHaloModel
from . import utils
logger = logging.getLogger(__name__)
DEFAULTS = TracerHaloModel.get_all_parameter_defaults()
class RangeSlider(forms.TextInput):
def __init__(self, minimum, maximum, step, elem_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.minimum = str(minimum)
self.maximum = str(maximum)
self.step = str(step)
self.elem_name = str(elem_name)
def get_initial(self, val):
try:
rg = val.split(" - ")
return """[ """ + rg[0] + "," + rg[1] + """ ]"""
except IndexError:
return """[ """ + self.minimum + """,""" + self.maximum + """ ]"""
def render(self, name, value, attrs=None, renderer=None):
s = super(RangeSlider, self).render(name, value, attrs)
elem_id = re.findall(r'id_([A-Za-z0-9_\./\\-]*)"', s)[0]
val = self.get_initial(value)
html = (
"""<div id="slider-range-"""
+ elem_id
+ """"></div>
<script>
$('#id_"""
+ elem_id
+ """').attr("readonly", true)
$( "#slider-range-"""
+ elem_id
+ """" ).slider({
range: true,
min: """
+ self.minimum
+ """,
max: """
+ self.maximum
+ """,
step: """
+ self.step
+ """,
values: """
+ val
+ """,
slide: function( event, ui ) {
$( "#id_"""
+ elem_id
+ """" ).val(" """
+ self.elem_name
+ """ "+ ui.values[ 0 ] + " - " + ui.values[ 1 ] );
}
});
$( "#id_"""
+ elem_id
+ """" ).val(" """
+ self.elem_name
+ """ "+ $( "#slider-range-"""
+ elem_id
+ """" ).slider( "values", 0 ) +
" - " + $( "#slider-range-"""
+ elem_id
+ """" ).slider( "values", 1 ) );
</script>
"""
)
return mark_safe(s + html)
class FloatListField(forms.CharField):
"""
Defines a form field that accepts comma-separated real values and returns a list of floats.
"""
def __init__(self, min_value=None, max_value=None, *args, **kwargs):
self.min_val, self.max_val = min_value, max_value
super(FloatListField, self).__init__(*args, **kwargs)
def clean(self, value):
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
final_list = []
if value:
numbers = value.split(",")
for number in numbers:
try:
final_list.append(float(number))
except ValueError:
raise forms.ValidationError("%s is not a float" % number)
for number in final_list:
if self.min_val is not None and number < self.min_val:
raise forms.ValidationError(
f"Must be greater than {self.min_val} ({number})"
)
if self.max_val is not None and number > self.max_val:
raise forms.ValidationError(
f"Must be smaller than {self.max_val} ({number})"
)
return final_list
class RangeSliderField(forms.CharField):
def __init__(self, *args, **kwargs):
self.name = kwargs.pop("name", "")
self.minimum = kwargs.pop("minimum", 0)
self.maximum = kwargs.pop("maximum", 100)
self.step = kwargs.pop("step", 1)
kwargs["widget"] = RangeSlider(self.minimum, self.maximum, self.step, self.name)
if "label" not in kwargs.keys():
kwargs["label"] = False
super(RangeSliderField, self).__init__(*args, **kwargs)
def clean(self, value):
super().clean(value)
items = value.split(" - ")
return [float(i) for i in items]
class CompositeForm(forms.Form):
"""
Helper class to handle form composition.
Usage::
class ProfileForm(CompositeForm):
form_list = [ProfileAddressForm, ProfileBirthDayForm]
Ripped from https://github.com/t0ster/django-composite-form
"""
form_list = None # Form classes
def __init__(self, data=None, files=None, field_order=None, *args, **kwargs):
super().__init__(data, files, field_order=None, *args, **kwargs)
self._form_instances = OrderedDict() # Form instances
for form in self.form_list:
kw = kwargs.copy()
self._form_instances[form] = form(data, files, *args, **kw)
for form in self.forms:
self.fields.update({f"{name}": val for name, val in form.fields.items()})
self.order_fields(self.field_order if field_order is None else field_order)
#
# for form in self.forms:
# self.initial.update(form.initial)
@property
def forms(self):
"""
Returns list of form instances
"""
# Preserving forms ordering
return [self._form_instances[form_class] for form_class in self.form_list]
def get_form(self, form_class):
"""
Returns form instance by its class
``form_class``: form class from ``forms_list``
"""
return self._form_instances[form_class]
def non_field_errors(self):
_errors = super().non_field_errors()
for form in self.forms:
_errors.extend(form.non_field_errors())
return _errors
def full_clean(self):
super().full_clean()
if not self.is_bound:
return
for form in self.forms:
form.full_clean()
self.cleaned_data.update(form.cleaned_data)
self._errors.update(form._errors)
class ComponentModelForm(forms.Form):
"""Base class for forms that define input to Components."""
label = None # The text that shows on the form's tab
kind = None # What it's called in the hmf framework, eg "hmf" for "hmf_model"
choices = None
_initial = None
multi = False
module = None
ignore_fields = []
add_fields = {}
field_kwargs = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.label is None:
self.label = utils.camel_to_words(self.__class__.__name__.split("Form")[0])
if self.kind is None:
self.kind = self.__class__.__name__.split("Form")[0].lower()
# Get initial model choice based on defaults of TracerHaloModel.
if self._initial is None:
df = DEFAULTS.get(self.kind + "_model")
if isinstance(df, str):
self._initial = df
elif df is None:
self._initial = "None"
else:
self._initial = df.__name__
# Fill the fields
if not self.multi:
self.fields[f"{self.kind}_model"] = forms.ChoiceField(
label=self.label,
choices=self.choices,
initial=self._initial,
required=True,
)
else:
self.fields[f"{self.kind}_model"] = forms.MultipleChoiceField(
label=self.label,
choices=self.choices,
initial=[self._initial],
required=True,
)
# Add all the possible parameters for this model
for choice in self.choices:
self._add_default_model(choice[0])
for fieldname, field in self.add_fields.items():
name = f"{self.kind}_{fieldname}"
self.fields[name] = field
self.fields[name].component = self.kind
self.fields[name].paramname = fieldname
def _process_extras(self, extra: list) -> Div:
"""Prepend extra fields to those inherently in the model."""
# Make it three column
row = Div(css_class="mt-4 row")
if not extra:
return row
n = min(len(extra), 3)
size = 12 // n
cols = [Div(css_class=f"col-{size}") for i in range(n)]
for i, x in enumerate(extra):
cols[i % len(cols)].append(Div(x, css_class="col"))
for col in cols:
row.append(col)
return row
def _layout(self, extra=[], appended_rows=[]):
extra_row = self._process_extras(extra)
tab = Tab(
self.label,
extra_row,
Div(
Div(
Field(
f"{self.kind}_model",
css_class="hmf_model",
data_component=self.kind,
),
css_class="col-8",
),
self._get_model_param_divs(),
css_class="mt-4 row",
),
)
for row in appended_rows:
tab.append(row)
return tab
def _add_default_model(self, model):
# Allow a "None" class
if model == "None" or model is None:
return
cls = getattr(self.module, model)
for key, val in getattr(cls, "_defaults", {}).items():
name = f"{self.kind}_{model}_{key}"
if (
key in self.ignore_fields
or model + "_" + key in self.ignore_fields
or isinstance(val, dict)
or val is None
):
continue
field_types = {
float: forms.FloatField,
bool: forms.BooleanField,
str: forms.ChoiceField,
}
fkw = self.field_kwargs.get(key, {})
thisfield = fkw.pop("type", field_types.get(type(val), forms.FloatField))
self.fields[name] = thisfield(
label=fkw.pop("label", key), initial=str(val), required=False, **fkw
)
self.fields[name].component = self.kind
self.fields[name].model = model
self.fields[name].paramname = key
def _get_model_param_divs(self):
param_div = Div(css_class="col-4")
for name, field in self.fields.items():
if name == f"{self.kind}_model":
continue
if hasattr(field, "model"):
param_div.append(
Div(
name,
css_class="col",
data_component=self.kind,
data_model=field.model,
)
)
else:
param_div.append(Div(name, css_class="col"))
return param_div
class FrameworkForm(forms.Form):
"""Base class for forms that define inputs to Frameworks."""
label = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.label is None:
self.label = self.__class__.__name__
def _layout(self, extra=[], appended_rows=[]):
keys = list(extra) + list(self.fields.keys())
return Tab(self.label, Div(*keys, css_class="mt-4 col"))
|
dblevin1/TheHaloMod | TheHaloMod/urls.py | from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import include, path
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
path("", include("halomod_app.urls")),
]
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
|
dblevin1/TheHaloMod | halomod_app/views.py | import datetime
# import logging
import io
import logging
import zipfile
from collections import OrderedDict
import numpy as np
from django.conf import settings
from django.core.mail import send_mail
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from django.http import Http404
from halomod import TracerHaloModel
from tabination.views import TabView
from hmf.helpers.cfg_utils import framework_to_dict
import toml
from . import forms
from . import utils
logger = logging.getLogger(__name__)
class BaseTab(TabView):
"""Base class for all main navigation tabs."""
tab_group = "main"
top = True
class about(BaseTab):
"""
The home-page. Should just be simple html with links to what to do.
"""
_is_tab = True
template_name = "about.html"
tab_id = "/about/"
tab_label = "About"
class help(BaseTab):
"""
A simple html 'end-page' which shows information about parameters used.
"""
_is_tab = True
tab_id = "/help/"
tab_label = "Help"
template_name = "help.html"
class CalculatorInputBase(FormView):
"""The form for input."""
# Define the needed variables for FormView class
form_class = forms.FrameworkInput
success_url = "/"
template_name = "calculator_form.html"
def form_valid(self, form):
"""Define what to do if the form is valid."""
label = form.cleaned_data["label"]
if "objects" not in self.request.session:
self.request.session["objects"] = OrderedDict()
if "forms" not in self.request.session:
self.request.session["forms"] = OrderedDict()
self.request.session["objects"].update({label: form.halomod_obj})
self.request.session["forms"].update({label: form.data})
return super().form_valid(form)
class CalculatorInputCreate(CalculatorInputBase):
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
prev_label = self.kwargs.get("label", None)
forms = self.request.session.get("forms", {})
kwargs.update(
current_models=self.request.session.get("objects", None),
model_label=prev_label,
initial=forms.get(prev_label, None) if prev_label else None,
)
return kwargs
class CalculatorInputEdit(CalculatorInputCreate):
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update(edit=True)
return kwargs
def get(self, request, *args, **kwargs):
"""
Handles GET requests and instantiates a blank version of the form.
"""
if kwargs.get("label", "") not in self.request.session.get("objects", {}):
return HttpResponseRedirect("/create/")
return super().get(request, *args, **kwargs)
def form_valid(self, form):
result = super().form_valid(form)
old_label = self.kwargs["label"]
new_label = form.cleaned_data["label"]
# If editing, and the label was changed, we need to remove the old label.
if old_label != new_label:
# Delete all objects with the old label.
del self.request.session["objects"][old_label]
# Delete the model_errors
try:
del self.request.session["model_errors"][old_label]
except KeyError:
if old_label != "default":
if "model_errors" not in self.request.session:
logger.error(
f"When trying to delete {old_label} from model_errors, turns out model_errors hasn't yet been defined. User should see nothing wrong though."
)
elif old_label not in self.request.session["model_errors"]:
logger.error(
f"When trying to delete {old_label} from model_errors, turns out "
f"{old_label} doesn't exist. User should see nothing wrong though."
)
# Remove the old form.
try:
del self.request.session["forms"][old_label]
except KeyError:
# Special-case the original 'default', since it doesn't exist as a form
# at first.
if self.kwargs["label"] != "default":
raise
return result
def delete_plot(request, label):
if len(request.session.get("objects", {})) > 1:
try:
del request.session["objects"][label]
except KeyError:
pass
try:
del request.session["forms"][label]
except KeyError:
pass
try:
del request.session["model_errors"][label]
except KeyError:
pass
return HttpResponseRedirect("/")
def complete_reset(request):
try:
del request.session["objects"]
del request.session["forms"]
del request.session["model_errors"]
except KeyError:
pass
return HttpResponseRedirect("/")
class ViewPlots(BaseTab):
def get(self, request, *args, **kwargs):
# Create a default TracerHaloModel object that displays upon opening.
if "objects" not in request.session:
default_obj = TracerHaloModel(hod_params={"central": True})
request.session["objects"] = OrderedDict(default=default_obj)
request.session["forms"] = OrderedDict()
request.session["model_errors"] = OrderedDict()
self.form = forms.PlotChoice(request)
self.warnings = "" # request.session['warnings']
model_errors = {
k: "\n".join([str(vv) for vv in v.keys()])
for k, v in request.session.get("model_errors", {}).items()
}
return self.render_to_response(
self.get_context_data(
form=self.form,
warnings=self.warnings,
model_errors=model_errors,
objects=request.session["objects"],
)
)
template_name = "image_page.html"
_is_tab = True
tab_id = "/"
tab_label = "Calculator"
top = True
def plots(request, filetype, plottype):
"""
Chooses the type of plot needed and the filetype (pdf or png) and outputs it
"""
objects = request.session.get("objects", {})
if filetype not in ["png", "svg", "pdf", "zip"]:
logger.error(f"Strange 'filetype' extension requested: {filetype}. 404ing...")
raise Http404
if not objects:
return HttpResponseRedirect("/")
elif len(objects) > 1:
keymap = {
**utils.KEYMAP,
"comparison_dndm": {
"xlab": utils.MLABEL,
"ylab": r"Ratio of Mass Functions $ \left(\frac{dn}{dM}\right) / \left( \frac{dn}{dM} \right)_{%s} $"
% list(objects.keys())[0],
"yscale": "log",
"basey": 2,
},
"comparison_fsigma": {
"xlab": utils.MLABEL,
"ylab": r"Ratio of Fitting Functions $f(\sigma)/ f(\sigma)_{%s}$"
% list(objects.keys())[0],
"yscale": "log",
"basey": 2,
},
}
else:
keymap = utils.KEYMAP
# Save the current plottype to the session for use elsewhere
if filetype == "svg":
# only save it when svg, which is what actually shows.
request.session["current_plot"] = plottype
figure_buf, errors = utils.create_canvas(
objects, plottype, keymap[plottype], plot_format=filetype
)
# How to output the image
if filetype == "png":
response = HttpResponse(figure_buf.getvalue(), content_type="image/png")
elif filetype == "svg":
response = HttpResponse(figure_buf.getvalue(), content_type="image/svg+xml")
elif filetype == "pdf":
response = HttpResponse(figure_buf.getvalue(), content_type="application/pdf")
response["Content-Disposition"] = "attachment;filename=" + plottype + ".pdf"
elif filetype == "zip":
response = io.StringIO()
else:
logger.error(f"Strange 'filetype' extension requested: {filetype}. 404ing...")
raise Http404
for k, v in errors.items():
if k not in request.session["model_errors"]:
request.session["model_errors"][k] = {v: [plottype]}
else:
if v not in request.session["model_errors"][k]:
request.session["model_errors"][k][v] = {
plottype,
}
else:
request.session["model_errors"][k][v].add(plottype)
return response
def header_txt(request):
# Open up file-like objects for response
response = HttpResponse(content_type="application/zip")
response["Content-Disposition"] = "attachment; filename=all_plots.zip"
buff = io.BytesIO()
archive = zipfile.ZipFile(buff, "w", zipfile.ZIP_DEFLATED)
# Import all the input form data so it can be written to file
objects = request.session["objects"]
for i, (label, o) in enumerate(objects.items()):
s = io.BytesIO()
s.write(toml.dumps(framework_to_dict(o)).encode())
archive.writestr(f"{label}.toml", s.getvalue())
s.close()
archive.close()
buff.flush()
ret_zip = buff.getvalue()
buff.close()
response.write(ret_zip)
return response
def data_output(request):
# TODO: output HDF5 format
# Import all the data we need
objects = request.session["objects"]
labels = list(objects.keys())
objects = list(objects.values())
# Open up file-like objects for response
response = HttpResponse(content_type="application/zip")
response["Content-Disposition"] = "attachment; filename=all_plots.zip"
buff = io.BytesIO()
archive = zipfile.ZipFile(buff, "w", zipfile.ZIP_DEFLATED)
# Write out mass-based, k-based and r-based data files
for i, o in enumerate(objects):
for kind in utils.XLABELS:
s = io.BytesIO()
s.write(f"# [0] {utils.XLABELS[kind]}".encode())
items = {
k: utils.KEYMAP[k]["ylab"]
for k in utils.KEYMAP
if utils.KEYMAP[k]["xlab"] == utils.XLABELS[kind]
}
for j, (label, ylab) in enumerate(items.items()):
if getattr(o, label) is not None:
s.write(f"# [{j+1}] {ylab}".encode())
out = np.array(
[getattr(o, kind)]
+ [
getattr(o, label)
for label in items
if getattr(o, label) is not None
]
).T
np.savetxt(s, out)
archive.writestr(f"{kind}Vector_{labels[i]}.txt", s.getvalue())
s.close()
archive.close()
buff.flush()
ret_zip = buff.getvalue()
buff.close()
response.write(ret_zip)
return response
def halogen(request):
# Import all the data we need
objects = request.session["objects"]
labels = list(objects.keys())
objects = list(objects.values())
# Open up file-like objects for response
response = HttpResponse(content_type="application/zip")
response["Content-Disposition"] = "attachment; filename=halogen.zip"
buff = io.BytesIO()
archive = zipfile.ZipFile(buff, "w", zipfile.ZIP_DEFLATED)
# Write out ngtm and lnP data files
for i, o in enumerate(objects):
s = io.BytesIO()
# MASS BASED
out = np.array([o.m, o.ngtm]).T
np.savetxt(s, out)
archive.writestr("ngtm_%s.txt" % labels[i], s.getvalue())
s.close()
s = io.StringIO()
# K BASED
out = np.array([o.k, o.power]).T
np.savetxt(s, out)
archive.writestr("matterpower_%s.txt" % labels[i], s.getvalue())
archive.close()
buff.flush()
ret_zip = buff.getvalue()
buff.close()
response.write(ret_zip)
return response
class ContactFormView(FormView):
form_class = forms.ContactForm
template_name = "email_form.html"
success_url = "/email-sent/"
def form_valid(self, form):
message = "{name} / {email} said: ".format(
name=form.cleaned_data.get("name"), email=form.cleaned_data.get("email")
)
message += "\n\n{0}".format(form.cleaned_data.get("message"))
send_mail(
subject=form.cleaned_data.get("subject").strip(),
message=message,
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[settings.CONTACT_RECIPIENTS],
)
return super(ContactFormView, self).form_valid(form)
class EmailSuccess(TemplateView):
template_name = "email_sent.html"
# ===============================================================================
# Some views that just return downloadable content
# ===============================================================================
def get_code(request, name):
suffix = name.split(".")[-1]
with open(name, "r") as f:
if suffix == "pdf":
response = HttpResponse(f.read(), content_type="application/pdf")
elif suffix == "py":
response = HttpResponse(f.read(), content_type="text/plain")
elif suffix == "zip":
response = HttpResponse(f.read(), content_type="application/zip")
response["Content-Disposition"] = "attachment;filename=" + name
return response
class UserErrorReport(FormView):
form_class = forms.UserErrorForm
template_name = "user_error_form.html"
success_url = "/email-sent/"
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["objects"] = self.request.session.get("objects", {})
kwargs["current_quantity"] = self.request.session.get("current_plot", None)
kwargs["model"] = self.kwargs.get("model", None)
return kwargs
def form_valid(self, form):
name = form.cleaned_data.get("name")
email = form.cleaned_data.get("email")
message = form.cleaned_data.get("message")
name = name or "anonymous"
email = email or "anonymous"
message = f"{name} / {email} said: \n\n{message}"
message += (
f"\n\nModels Considered Bad: {'; '.join(form.cleaned_data.get('models'))}"
)
message += f"\nQuantities Considered Bad: {'; '.join(form.cleaned_data.get('quantity'))}"
message += "\n\nMODELS:\n\n"
for label, obj in self.request.session["objects"].items():
message += f"{label}\n{'-'*len(label)}\n"
message += toml.dumps(framework_to_dict(obj))
message += "\n\n"
logger.error(message)
return super().form_valid(form)
|
dblevin1/TheHaloMod | TheHaloMod/settings/local.py | <filename>TheHaloMod/settings/local.py
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
DEBUG = True
SECRET_KEY = env("DJANGO_SECRET_KEY", default="!!!SET DJANGO_SECRET_KEY!!!",)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1", "[::1]"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Write emails to screen instead of actually sending them.
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
dblevin1/TheHaloMod | TheHaloMod/settings/production.py | import logging
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from .base import * # noqa
from .base import env, LOGGING, ROOT_DIR
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["*"])
# DATABASES
# ------------------------------------------------------------------------------
# DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
# DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
# DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
# CACHES = {
# "default": {
# "BACKEND": "django_redis.cache.RedisCache",
# "LOCATION": env("REDIS_URL"),
# "OPTIONS": {
# "CLIENT_CLASS": "django_redis.client.DefaultClient",
# # Mimicing memcache behavior.
# # http://jazzband.github.io/django-redis/latest/#_memcached_exceptions_behavior
# "IGNORE_EXCEPTIONS": True,
# },
# }
# }
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
# TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
# (
# "django.template.loaders.cached.Loader",
# [
# "django.template.loaders.filesystem.Loader",
# "django.template.loaders.app_directories.Loader",
# ],
# )
# ]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
HOST_EMAIL = env("HOST_EMAIL")
# Whether to use a TLS (secure) connection when talking to the SMTP server.
EMAIL_USE_TLS = True
EMAIL_HOST = "smtp.gmail.com"
EMAIL_HOST_USER = HOST_EMAIL
SERVER_EMAIL = HOST_EMAIL
DEFAULT_FROM_EMAIL = SERVER_EMAIL
EMAIL_PORT = env.int("EMAIL_PORT", default=587)
MY_EMAIL = env("MY_EMAIL")
EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD")
ADMINS = (("Steven", MY_EMAIL),)
MANAGERS = ADMINS
CONTACT_RECIPIENTS = MY_EMAIL
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default=f"TheHaloMod <{HOST_EMAIL}>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default="[TheHaloMod]")
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
# ADMIN_URL = env("DJANGO_ADMIN_URL")
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING["loggers"].update(
{
"django.db.backends": {
"level": "ERROR",
"handlers": ["console_prod"],
"propagate": False,
},
"sentry_sdk": {
"level": "ERROR",
"handlers": ["console_prod"],
"propagate": False,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console_prod"],
"propagate": False,
},
}
)
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
sentry_sdk.init(dsn=SENTRY_DSN, integrations=[sentry_logging, DjangoIntegration()])
GOOGLE_ANALYTICS_PROPERTY_ID = env("GOOGLE_ANALYTICS_PROPERTY_ID")
|
dblevin1/TheHaloMod | TheHaloMod/settings/base.py | <gh_stars>1-10
"""Base settings to build other settings off."""
# ===============================================================================
# THIRD_PARTY IMPORTS
# ===============================================================================
import dill
from django.contrib.sessions import serializers
from django.core.cache.backends import locmem
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
env = environ.Env()
DOT_ENV_FILE = env("DOT_ENV_FILE", default="production")
# Read a base env file, then overwrite with env-specific variables
env.read_env(str(ROOT_DIR / ".envs" / "base"))
env.read_env(str(ROOT_DIR / ".envs" / DOT_ENV_FILE))
DEBUG = env.bool("DJANGO_DEBUG", False)
TEMPLATE_DEBUG = DEBUG
CRISPY_FAIL_SILENTLY = not DEBUG
# Change the Pickle Serializer to use dill.
serializers.pickle = dill # noqa
locmem.pickle = dill # noqa
# ===============================================================================
# DATABASE SETTINGS
# ===============================================================================
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3", # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
"NAME": str(ROOT_DIR / "db"), # Or path to database file if using sqlite3.
"USER": "", # Not used with sqlite3.
"PASSWORD": "", # Not used with sqlite3.
"HOST": "", # Set to empty string for localhost. Not used with sqlite3.
"PORT": "", # Set to empty string for default. Not used with sqlite3.
}
}
# ===============================================================================
# INSTALLED APPS
# ===============================================================================
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"analytical",
"crispy_forms",
"halomod_app",
"bootstrap_modal_forms",
]
# ===============================================================================
# CRISPY SETTINGS
# ===============================================================================
CRISPY_TEMPLATE_PACK = "bootstrap4"
# ===============================================================================
# LOGGING SETUP
# ===============================================================================
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"require_debug_true": {"()": "django.utils.log.RequireDebugTrue"},
},
"handlers": {
"console_dev": {
"level": env("LOG_LEVEL", default="ERROR"),
"filters": ["require_debug_true"],
"class": "logging.StreamHandler",
},
"console_prod": {
"level": "INFO",
"filters": ["require_debug_false"],
"class": "logging.StreamHandler",
},
},
"root": {"level": "INFO", "handlers": ["console_dev", "console_prod"]},
"loggers": {
"django.request": {
"handlers": ["console_dev", "console_prod"],
"level": "INFO",
"propagate": True,
},
"halomod_app": {"handlers": ["console_dev", "console_prod"], "level": "INFO"},
},
}
# ===============================================================================
# LOCALE SETTINGS
# ===============================================================================
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "UTC"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# ===============================================================================
# HOW TO GET TO MEDIA/STATIC FILES
# ===============================================================================
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = "/media/"
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
STATIC_ROOT = str(ROOT_DIR / "static")
MEDIA_ROOT = str(ROOT_DIR / "media")
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"django.contrib.staticfiles.finders.DefaultStorageFinder",
)
# ===============================================================================
# TEMPLATES ETC.
# ===============================================================================
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [ROOT_DIR / "templates"],
"APP_DIRS": True,
}
]
# ===============================================================================
# MISCELLANEOUS
# ===============================================================================
MIDDLEWARE = [
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "TheHaloMod.urls"
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = "TheHaloMod.wsgi.application"
SESSION_SAVE_EVERY_REQUEST = True
# Use a local-memory cache session engine. If we don't do this,
# the session objects (which can be quite large, since we're pickling full halomodel
# instances) are saved to the db. This is bad firstly because it's slow, and secondly
# because the db get's filled up with stuff we never want to commit to git.
# On the actual site, we should probably use memcached instead of the locmem cache.
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
# ==============================================================================
# SECURITY
# ==============================================================================
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# ===============================================================================
# EMAIL SETUP
# ===============================================================================
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
|
dblevin1/TheHaloMod | halomod_app/templatetags/hmf_version.py | """
Created on Apr 10, 2013
@author: Steven
"""
import hmf
from django import template
register = template.Library()
def current_hmf_version():
return hmf.__version__
register.simple_tag(current_hmf_version)
|
dblevin1/TheHaloMod | halomod_app/urls.py | from django.urls import path
from django.views.generic.base import RedirectView
from django.contrib.staticfiles.storage import staticfiles_storage
from . import views
urlpatterns = [
path(
"favicon.ico",
RedirectView.as_view(
url=staticfiles_storage.url("halomod_app/img/favicon.ico"), permanent=False
),
name="favicon",
),
path("create/", views.CalculatorInputCreate.as_view(), name="calculate"),
path("create/<label>/", views.CalculatorInputCreate.as_view(), name="calculate",),
path("edit/<label>/", views.CalculatorInputEdit.as_view(), name="calculate"),
path("delete/<label>/", views.delete_plot, name="delete"),
path("restart/", views.complete_reset, name="restart"),
path("help/", views.help.as_view(), name="help"),
# path(
# 'hmf_resources/',
# views.resources.as_view(),
# name='resources'
# ),
# path(
# 'hmf_acknowledgments/',
# views.acknowledgments.as_view(),
# name='acknowledgments'
# ),
path("", views.ViewPlots.as_view(), name="image-page"),
path("plot/<plottype>.<filetype>", views.plots, name="images"),
path("download/allData.zip", views.data_output, name="data-output"),
path("download/parameters.txt", views.header_txt, name="header-txt"),
path("download/halogen.zip", views.halogen, name="halogen-output"),
path("contact/", views.ContactFormView.as_view(), name="contact-email"),
path("email-sent/", views.EmailSuccess.as_view(), name="email-success"),
path("report/", views.UserErrorReport.as_view(), name="report_model"),
path("report/<model>/", views.UserErrorReport.as_view(), name="report_model"),
path("about/", views.about.as_view(), name="about"),
]
|
dblevin1/TheHaloMod | halomod_app/templatetags/halomod_version.py | <filename>halomod_app/templatetags/halomod_version.py
"""
Created on Apr 10, 2013
@author: Steven
"""
import halomod
from django import template
register = template.Library()
def current_halomod_version():
return halomod.__version__
register.simple_tag(current_halomod_version)
|
dblevin1/TheHaloMod | halomod_app/utils.py | <filename>halomod_app/utils.py
"""Plotting and driving utilities for halomod."""
import io
import logging
import matplotlib.ticker as tick
from halomod import TracerHaloModel
from halomod.wdm import HaloModelWDM
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_pdf import FigureCanvasPdf
from matplotlib.backends.backend_svg import FigureCanvasSVG
from matplotlib.figure import Figure
import re
logger = logging.getLogger(__name__)
def hmf_driver(cls=TracerHaloModel, previous: [None, TracerHaloModel] = None, **kwargs):
if previous is None:
return cls(**kwargs)
elif "wdm_model" in kwargs and not isinstance(previous, HaloModelWDM):
return HaloModelWDM(**kwargs)
elif "wdm_model" not in kwargs and isinstance(previous, HaloModelWDM):
return TracerHaloModel(**kwargs)
else:
this = previous.clone(**kwargs)
# TODO: this is a hack, and should be fixed in hmf
# we have to set all _params whose model has been changed to {}
# so that they don't get carry-over parameters from other models.
for k, v in kwargs.items():
if k.endswith("model") and v != getattr(this, k).__class__.__name__:
this.update(**{k.replace("model", "params"): {}})
return this
def create_canvas(objects, q: str, d: dict, plot_format: str = "png"):
# TODO: make log scaling automatic
fig = Figure(figsize=(10, 6), edgecolor="white", facecolor="white", dpi=100)
ax = fig.add_subplot(111)
ax.grid(True)
ax.set_xlabel(d["xlab"], fontsize=15)
ax.set_ylabel(d["ylab"], fontsize=15)
lines = ["-", "--", "-.", ":"]
if q.startswith("comparison"):
compare = True
q = q[11:]
else:
compare = False
# Get the kind of axis we're comparing to.
for x, label in XLABELS.items():
if KEYMAP[q]["xlab"] == label:
break
else:
raise ValueError(f"The quantity {q} is not found in KEYMAP")
errors = {}
ys = {}
for i, (l, o) in enumerate(objects.items()):
if not compare:
try:
y = getattr(o, q)
mask = y > 1e-40 * y.max()
ys[l] = y[mask]
if y is not None:
ax.plot(
getattr(o, x)[mask],
y[mask],
color=f"C{i % 7}",
linestyle=lines[(i // 7) % 4],
label=l,
)
except Exception as e:
logger.exception(f"Error encountered getting {q} for model called {l}.")
errors[l] = e
else:
if i == 0:
comp_obj = o
continue
try:
ynum = getattr(o, q)
except Exception as e:
logger.exception(f"Error encountered getting {q} for model called {l}.")
errors[l] = e
yden = getattr(comp_obj, q)
mask = yden > 0
if ynum is not None and yden is not None:
y = ynum[mask] / yden[mask]
ys[l] = y
ax.plot(
getattr(o, x)[mask],
y,
color=f"C{(i+1) % 7}",
linestyle=lines[((i + 1) // 7) % 4],
label=l,
)
try:
# Shrink current axis by 30%
ax.set_xscale("log")
ax.set_yscale(d["yscale"], base=d.get("basey", 10))
if d["yscale"] == "log" and d.get("basey", 10) == 2:
ax.yaxis.set_major_formatter(tick.ScalarFormatter())
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.6, box.height])
# Put a legend to the right of the current axis
ax.legend(loc="center left", bbox_to_anchor=(1, 0.5), fontsize=15)
buf = io.BytesIO()
if plot_format == "pdf":
FigureCanvasPdf(fig).print_pdf(buf)
elif plot_format == "png":
FigureCanvasAgg(fig).print_png(buf)
elif plot_format == "svg":
FigureCanvasSVG(fig).print_svg(buf)
else:
raise ValueError("plot_format should be png, pdf or svg!")
except Exception:
logger.info(f"y-axis data: {ys}")
logger.exception("Something went wrong in creating the image itself")
raise
return buf, errors
MLABEL = r"Mass $(M_{\odot}h^{-1})$"
KLABEL = r"Wavenumber, $k$ [$h$/Mpc]"
RLABEL = r"Scale, $r$ [Mpc/$h$]"
KHMLABEL = r"Fourier Scale, $k$ [$h$/Mpc]"
XLABELS = {"m": MLABEL, "k": KLABEL, "r": RLABEL, "k_hm": KHMLABEL}
KEYMAP = {
"dndm": {
"xlab": MLABEL,
"ylab": r"Mass Function $\left( \frac{dn}{dM} \right) h^4 Mpc^{-3}M_\odot^{-1}$",
"yscale": "log",
},
"dndlnm": {
"xlab": MLABEL,
"ylab": r"Mass Function $\left( \frac{dn}{d\ln M} \right) h^3 Mpc^{-3}$",
"yscale": "log",
},
"dndlog10m": {
"xlab": MLABEL,
"ylab": r"Mass Function $\left( \frac{dn}{d\log_{10}M} \right) h^3 Mpc^{-3}$",
"yscale": "log",
},
"fsigma": {
"xlab": MLABEL,
"ylab": r"$f(\sigma) = \nu f(\nu)$",
"yscale": "linear",
},
"ngtm": {"xlab": MLABEL, "ylab": r"$n(>M) h^3 Mpc^{-3}$", "yscale": "log"},
"rho_gtm": {
"xlab": MLABEL,
"ylab": r"$\rho(>M)$, $M_{\odot}h^{2}Mpc^{-3}$",
"yscale": "log",
},
"rho_ltm": {
"xlab": MLABEL,
"ylab": r"$\rho(<M)$, $M_{\odot}h^{2}Mpc^{-3}$",
"yscale": "linear",
},
"how_big": {"xlab": MLABEL, "ylab": r"Box Size, $L$ Mpc$h^{-1}$", "yscale": "log",},
"sigma": {"xlab": MLABEL, "ylab": r"Mass Variance, $\sigma$", "yscale": "linear",},
"lnsigma": {"xlab": MLABEL, "ylab": r"$\ln(\sigma^{-1})$", "yscale": "linear"},
"n_eff": {
"xlab": MLABEL,
"ylab": r"Effective Spectral Index, $n_{eff}$",
"yscale": "linear",
},
"power": {"xlab": KLABEL, "ylab": r"$P(k)$, [Mpc$^3 h^{-3}$]", "yscale": "log"},
"transfer_function": {
"xlab": KLABEL,
"ylab": r"$T(k)$, [Mpc$^3 h^{-3}$]",
"yscale": "log",
},
"delta_k": {"xlab": KLABEL, "ylab": r"$\Delta(k)$", "yscale": "log"},
"halo_bias": {"xlab": MLABEL, "ylab": "Halo Bias", "yscale": "log"},
"cmz_relation": {"xlab": MLABEL, "ylab": "Halo Concentration", "yscale": "log",},
"corr_auto_tracer": {
"xlab": RLABEL,
"ylab": r"Tracer correlation, $\xi_{2h}(r)$",
"yscale": "log",
},
"corr_2h_auto_tracer": {
"xlab": RLABEL,
"ylab": r"2-halo tracer correlation, $\xi_{2h}(r)$",
"yscale": "log",
},
"corr_1h_auto_tracer": {
"xlab": RLABEL,
"ylab": r"1-halo tracer correlation, $\xi_{1h}(r)$",
"yscale": "log",
},
"corr_1h_cs_auto_tracer": {
"xlab": RLABEL,
"ylab": r"1-halo central-sallite tracer correlation, $\xi_{1h}^{cs}(r)$",
"yscale": "log",
},
"corr_1h_ss_auto_tracer": {
"xlab": RLABEL,
"ylab": r"1-halo satellite-sallite tracer correlation, $\xi_{1h}^{ss}(r)$",
"yscale": "log",
},
"corr_linear_mm": {
"xlab": RLABEL,
"ylab": r"Linear matter correlation, $\xi_{m}^{\rm lin}(r)$",
"yscale": "log",
},
"corr_1h_auto_matter": {
"xlab": RLABEL,
"ylab": r"1-halo matter correlation, $\xi_{1h}(r)$",
"yscale": "log",
},
"corr_2h_auto_matter": {
"xlab": RLABEL,
"ylab": r"2-halo matter correlation, $\xi_{2h}(r)$",
"yscale": "log",
},
"corr_1h_cross_tracer_matter": {
"xlab": RLABEL,
"ylab": r"1-halo matter-tracer correlation, $\xi_{1h}^{m\times T}(r)$",
"yscale": "linear",
},
"corr_2h_cross_tracer_matter": {
"xlab": RLABEL,
"ylab": r"2-halo matter-tracer correlation, $\xi_{2h}^{m\times T}(r)$",
"yscale": "log",
},
"corr_auto_matter": {
"xlab": RLABEL,
"ylab": r"Matter correlation, $\xi_{mm}(r)$",
"yscale": "log",
},
"corr_cross_tracer_matter": {
"xlab": RLABEL,
"ylab": r"Matter-tracer correlation, $\xi_{m\times T}(r)$",
"yscale": "log",
},
"sd_bias_correction": {
"xlab": RLABEL,
"ylab": "Scale-dependent bias correction",
"yscale": "linear",
},
"central_occupation": {
"xlab": MLABEL,
"ylab": "Central Tracer Occupation",
"yscale": "log",
},
"satellite_occupation": {
"xlab": MLABEL,
"ylab": "Satellite Tracer Occupation",
"yscale": "log",
},
"total_occupation": {
"xlab": MLABEL,
"ylab": "Total Tracer Occupation",
"yscale": "log",
},
"power_2h_auto_matter": {
"xlab": KHMLABEL,
"ylab": r"2-halo matter $P_{mm}^{2h}(k)$ [Mpc$^3 h^{-3}$]",
"yscale": "log",
},
"power_1h_auto_matter": {
"xlab": KHMLABEL,
"ylab": r"1-halo matter $P_{mm}^{1h}(k)$ [Mpc$^3 h^{-3}$]",
"yscale": "log",
},
"power_auto_matter": {
"xlab": KHMLABEL,
"ylab": r"2-halo matter $P_{mm}(k)$ [Mpc$^3 h^{-3}$]",
"yscale": "log",
},
"power_auto_tracer": {
"xlab": KHMLABEL,
"ylab": r"Tracer $P_{TT}(k)$ [Mpc$^3 h^{-3}$]",
"yscale": "log",
},
"power_1h_auto_tracer": {
"xlab": KHMLABEL,
"ylab": r"1-halo tracer $P_{TT}^{1h}(k)$ [Mpc$^3 h^{-3}$]",
"yscale": "log",
},
"power_2h_auto_tracer": {
"xlab": KHMLABEL,
"ylab": r"2-halo tracer $P_{TT}^{2h}(k)$ [Mpc$^3 h^{-3}$]",
"yscale": "log",
},
"power_1h_cs_auto_tracer": {
"xlab": KHMLABEL,
"ylab": r"1-halo cen-sat tracer $P_{TT}^{1h, cs}(k)$ [Mpc$^3 h^{-3}$]",
"yscale": "log",
},
"power_1h_ss_auto_tracer": {
"xlab": KHMLABEL,
"ylab": r"1-halo sat-sat tracer $P_{TT}^{1h, ss}(k)$ [Mpc$^3 h^{-3}$]",
"yscale": "log",
},
"power_1h_cross_tracer_matter": {
"xlab": KHMLABEL,
"ylab": r"1-halo matter-tracer $P_{m\times T}^{1h}(k)$ [Mpc$^3 h^{-3}$]",
"yscale": "log",
},
"power_2h_cross_tracer_matter": {
"xlab": KHMLABEL,
"ylab": r"2-halo matter-tracer $P_{m\times T}^{2h}(k)$ [Mpc$^3 h^{-3}$]",
"yscale": "log",
},
"power_cross_tracer_matter": {
"xlab": KHMLABEL,
"ylab": r"Matter-tracer $P_{m\times T}(k)$ [Mpc$^3 h^{-3}$]",
"yscale": "log",
},
"nonlinear_delta_k": {
"xlab": KLABEL,
"ylab": r"$\Delta^2_{\rm halofit}(k)$",
"yscale": "log",
},
"nonlinear_power": {
"xlab": KLABEL,
"ylab": r"$P_{\rm halofit}(k)$ [Mpc$^3 h^{-3}$]",
"yscale": "log",
},
"radii": {"xlab": MLABEL, "ylab": r"Radius [Mpc/$h$]", "yscale": "log",},
"tracer_cmz_relation": {
"xlab": MLABEL,
"ylab": r"Tracer Concentration",
"yscale": "log",
},
}
def camel_to_words(word: str) -> str:
n = len(word)
word = re.sub(r"(?<!^)(?=[A-Z])", " ", word)
if len(word.split(" ")) == n:
return word.replace(" ", "")
return word
|
Jitesh17/printj | test/test_print_colors.py | <gh_stars>1-10
import printj
printj.red('YOUR TEXT')
printj.bold('YOUR TEXT')
printj.blue.italic_on_yellow('YOUR TEXT')
|
Jitesh17/printj | printj/printj_colors.py | <gh_stars>1-10
#!/usr/bin/python3
from printj.lib import Template
import os
from functools import partial
class say:
def __init__(self, text):
os.system(f'spd-say " {text} "')
class clear:
def __init__(self):
os.system('clear')
class bold:
# color = 'red'
def __init__(self, text):
print(Template.stylish_text(text=text, style='bold'))
class italic:
def __init__(self, text):
print(Template.stylish_text(text=text, style='italic'))
class underline:
def __init__(self, text):
print(Template.stylish_text(text=text, style='underline'))
class ColorPrint:
color = 'red'
def __init__(self, text):
self.text = text
# On a background
@classmethod
def on_black(cls, text):
print(Template.highlight_text(text=text, front=cls.color, back='black'))
@classmethod
def on_red(cls, text):
print(Template.highlight_text(text=text, front=cls.color, back='red'))
@classmethod
def on_green(cls, text):
print(Template.highlight_text(text=text, front=cls.color, back='green'))
@classmethod
def on_yellow(cls, text):
print(Template.highlight_text(text=text, front=cls.color, back='yellow'))
@classmethod
def on_blue(cls, text):
print(Template.highlight_text(text=text, front=cls.color, back='blue'))
@classmethod
def on_purple(cls, text):
print(Template.highlight_text(text=text, front=cls.color, back='purple'))
@classmethod
def on_cyan(cls, text):
print(Template.highlight_text(text=text, front=cls.color, back='cyan'))
@classmethod
def on_white(cls, text):
print(Template.highlight_text(text=text, front=cls.color, back='white'))
# Bold on a background
@classmethod
def bold_on_black(cls, text):
print(Template.highlight_text(text=text, style='bold', front=cls.color, back='black'))
@classmethod
def bold_on_red(cls, text):
print(Template.highlight_text(text=text, style='bold', front=cls.color, back='red'))
@classmethod
def bold_on_green(cls, text):
print(Template.highlight_text(text=text, style='bold', front=cls.color, back='green'))
@classmethod
def bold_on_yellow(cls, text):
print(Template.highlight_text(text=text, style='bold', front=cls.color, back='yellow'))
@classmethod
def bold_on_blue(cls, text):
print(Template.highlight_text(text=text, style='bold', front=cls.color, back='blue'))
@classmethod
def bold_on_purple(cls, text):
print(Template.highlight_text(text=text, style='bold', front=cls.color, back='purple'))
@classmethod
def bold_on_cyan(cls, text):
print(Template.highlight_text(text=text, style='bold', front=cls.color, back='cyan'))
@classmethod
def bold_on_white(cls, text):
print(Template.highlight_text(text=text, style='bold', front=cls.color, back='white'))
# Italic on a background
@classmethod
def italic_on_black(cls, text):
print(Template.highlight_text(text=text, style='italic', front=cls.color, back='black'))
@classmethod
def italic_on_red(cls, text):
print(Template.highlight_text(text=text, style='italic', front=cls.color, back='red'))
@classmethod
def italic_on_green(cls, text):
print(Template.highlight_text(text=text, style='italic', front=cls.color, back='green'))
@classmethod
def italic_on_yellow(cls, text):
print(Template.highlight_text(text=text, style='italic', front=cls.color, back='yellow'))
@classmethod
def italic_on_blue(cls, text):
print(Template.highlight_text(text=text, style='italic', front=cls.color, back='blue'))
@classmethod
def italic_on_purple(cls, text):
print(Template.highlight_text(text=text, style='italic', front=cls.color, back='purple'))
@classmethod
def italic_on_cyan(cls, text):
print(Template.highlight_text(text=text, style='italic', front=cls.color, back='cyan'))
@classmethod
def italic_on_white(cls, text):
print(Template.highlight_text(text=text, style='italic', front=cls.color, back='white'))
class black(ColorPrint):
color = 'black'
def __init__(self, text):
print(Template.color_text(text=text, front=black.color))
super().__init__(text)
class red(ColorPrint):
color = 'red'
def __init__(self, text):
print(Template.color_text(text=text, front=red.color))
super().__init__(text)
class green(ColorPrint):
color = 'green'
def __init__(self, text):
print(Template.color_text(text=text, front=green.color))
super().__init__(text)
class yellow(ColorPrint):
color = 'yellow'
def __init__(self, text):
print(Template.color_text(text=text, front=yellow.color))
super().__init__(text)
class blue(ColorPrint):
color = 'blue'
def __init__(self, text):
print(Template.color_text(text=text, front=blue.color))
# super().__init__(text)
class purple(ColorPrint):
color = 'purple'
def __init__(self, text):
print(Template.color_text(text=text, front=purple.color))
super().__init__(text)
class cyan(ColorPrint):
color = 'cyan'
def __init__(self, text):
print(Template.color_text(text=text, front=cyan.color))
super().__init__(text)
class white(ColorPrint):
color = 'white'
def __init__(self, text):
print(Template.color_text(text=text, front=white.color))
super().__init__(text)
class ColorText:
@staticmethod
def black(text: str):
return Template.color_text(text=text, front='black')
@staticmethod
def red(text: str):
return Template.color_text(text=text, front='red')
@staticmethod
def green(text: str):
return Template.color_text(text=text, front='green')
@staticmethod
def yellow(text: str):
return Template.color_text(text=text, front='yellow')
@staticmethod
def blue(text: str):
return Template.color_text(text=text, front='blue')
@staticmethod
def purple(text: str):
return Template.color_text(text=text, front='purple')
@staticmethod
def cyan(text: str):
return Template.color_text(text=text, front='cyan')
@staticmethod
def white(text: str):
return Template.color_text(text=text, front='white')
if __name__ == "__main__":
red('Error, does not compute!')
blue('Error, does not compute!')
white('Error, does not compute!')
# say('Error, does not compute!')
red.on_green('Error, does not compute!')
white.bold_on_green('Error, does not compute!')
blue.on_green('Error, does not compute!')
clear()
red(Template.stylish_text(text='Error, does not compute!', style='bold'))
underline(Template.stylish_text(text='Error, does not compute!', style='bold'))
print(f"{ColorText.cyan('cyan!')}{ColorText.blue('blue!')}")
print(Template.color_text(text='Erute!', front='cyan')) |
Jitesh17/printj | printj/__init__.py | <reponame>Jitesh17/printj
from .printj_colors import *
from .lib import *
# from .printj_colors import *
|
Jitesh17/printj | printj/lib.py | <gh_stars>1-10
#!/usr/bin/python3
CRED = '\033[31m'
CEND = '\033[0m'
CSI = '\x1B['
code = {
'black': '0',
'red': '1',
'green': '2',
'yellow': '3',
'blue': '4',
'purple': '5',
'cyan': '6',
'white': '7',
}
stl = dict()
stl['normal'] = '0'
stl['bold'] = '1'
stl['italic'] = '3'
stl['underline'] = '4'
fg = dict()
for color in code:
fg[color] = '3' + code[color]
bg = dict()
for color in code:
bg[color] = '4' + code[color]
class Template:
@staticmethod
def highlight_text( text='fill a text', style='normal', front='blue', back='yellow'):
return CSI + stl[style] + ';' + fg[front] + ';' + bg[back] + 'm' + str(text) + CEND
@staticmethod
def color_text(text='fill a text', style='normal', front='blue', back='yellow'):
return '\33[3' + code[front] + 'm' + str(text) + CEND
@staticmethod
def stylish_text(text='fill a text', style='normal', front='blue', back='yellow'):
return'\33[' + stl[style] + 'm' + str(text) + CEND
|
MeteoSwiss-APN/mch-python-blueprint-cython-example-chain_calc | {{cookiecutter.project_slug}}/src/{{cookiecutter.project_slug}}/cli.py | # -*- coding: utf-8 -*-
"""Console script for {{ cookiecutter.project_slug }}."""
import sys
import click
from . import __version__
from .mutable_number import MutableNumber
def print_version(ctx, param, value):
"""Print the version number and exit."""
if value:
click.echo(__version__)
ctx.exit(0)
@click.pass_context
def print_number(ctx, *args, **kwargs):
"""Print the current number."""
number = ctx.obj["number"].get()
click.echo(f"{number:g}")
@click.group(
context_settings={"help_option_names": ["-h", "--help"]},
no_args_is_help=True,
invoke_without_command=True,
chain=True,
result_callback=print_number,
)
@click.argument("number", type=float, nargs=1)
@click.option(
"--version",
"-V",
help="Print version and exit.",
is_flag=True,
expose_value=False,
callback=print_version,
)
@click.option(
"--verbose",
"-v",
count=True,
help="Increase verbosity (specify multiple times for more).",
)
@click.pass_context
def main(ctx, number, **kwargs):
"""Console script for test_cli_project."""
if ctx.obj is None:
ctx.obj = {}
ctx.obj["number"] = MutableNumber(number)
ctx.obj.update(kwargs)
def print_operation(ctx, operator, value):
if ctx.obj["verbose"]:
number = ctx.obj["number"]
click.echo(f"{number.get(-2):g} {operator} {value:g} = {number.get():g}")
@main.command("plus", help="addition")
@click.argument("addend", type=float, nargs=1)
@click.pass_context
def plus(ctx, addend):
ctx.obj["number"].add(addend)
print_operation(ctx, "+", addend)
@main.command("minus", help="subtraction")
@click.argument("subtrahend", type=float, nargs=1)
@click.pass_context
def minus(ctx, subtrahend):
ctx.obj["number"].subtract(subtrahend)
print_operation(ctx, "-", subtrahend)
@main.command("times", help="multiplication")
@click.argument("factor", type=float, nargs=1)
@click.pass_context
def times(ctx, factor):
ctx.obj["number"].multiply(factor)
print_operation(ctx, "*", factor)
@main.command("by", help="division")
@click.argument("divisor", type=float, nargs=1)
@click.pass_context
def by(ctx, divisor):
ctx.obj["number"].divide(divisor)
print_operation(ctx, "/", divisor)
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
|
MeteoSwiss-APN/mch-python-blueprint-cython-example-chain_calc | {{cookiecutter.project_slug}}/src/{{cookiecutter.project_slug}}/__init__.py | # -*- coding: utf-8 -*-
"""Top-level package for Chain Calculator."""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.1.0"
|
MeteoSwiss-APN/mch-python-blueprint-cython-example-chain_calc | {{cookiecutter.project_slug}}/setup.py | <reponame>MeteoSwiss-APN/mch-python-blueprint-cython-example-chain_calc<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The setup script.
"""
from setuptools import setup
from setuptools import find_packages
# Import Cython after setuptools
from Cython.Build import cythonize # isort:skip
def read_file(path):
with open(path, "r") as f:
return "\n".join([l.strip() for l in f.readlines()])
description_files = ["README.rst", "HISTORY.rst"]
metadata = {
"name": "{{ cookiecutter.project_slug }}",
"version": "{{ cookiecutter.version }}",
"description": "{{ cookiecutter.project_short_description }}",
"long_description": "\n\n".join([read_file(f) for f in description_files]),
"author": "{{ cookiecutter.full_name.replace('\"', '\\\"') }}",
"author_email": "{{ cookiecutter.email }}",
"url": "https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.project_slug }}",
"keywords": "{{ cookiecutter.project_slug }}",
"classifiers": [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Cython",
],
}
python = ">=3.7"
dependencies = [
"Click >= 6.0",
]
scripts = [
"{{ cookiecutter.project_slug }}={{ cookiecutter.project_slug }}.cli:main",
]
setup(
python_requires=python,
install_requires=dependencies,
entry_points={"console_scripts": scripts},
packages=find_packages("src"),
package_dir={"": "src"},
ext_modules=cythonize("src/*/*.pyx", annotate=True),
include_package_data=True,
**metadata,
)
|
prince21298/SCRAPING | task1.py | import requests
from bs4 import BeautifulSoup
import pprint
def scrap_top_list():
url=" https://www.imdb.com/india/top-rated-indian-movies/"
page=requests.get(url)
soup=BeautifulSoup(page.text,'html.parser')
main_div=soup.find('div',class_="lister")
div=main_div.find('tbody',class_="lister-list")
trs=div.findAll('tr')
num=0
main_data=[]
for tr in trs:
num=num+1
dic={}
title_colum=tr.find('td',class_="titleColumn").a
dic['name']=title_colum.get_text()
dic['position']=num
year=tr.find('span',class_="secondaryInfo")
new_year=year['year']=year.get_text()
cut=int(new_year[1:5])
dic['year']=cut
movie_rateing=tr.find('td',class_="ratingColumn imdbRating")
rateing=movie_rateing.get_text()
cut_rateing=float(rateing[3:5])
dic['rateing']=cut_rateing
dic['url']="https://www.imdb.com"+title_colum["href"][:17]
main_data.append(dic)
return (main_data)
pprint.pprint(scrap_top_list())
|
prince21298/SCRAPING | task12ex.py | import requests
from bs4 import BeautifulSoup
import json
import os.path
from os import path
from pprint import pprint
def scrap_top_list():
if os.path.exists("movie.json"):
with open ("movie.json",'r') as file:
read=file.read()
main_data=json.loads(read)
return(main_data)
else:
url=" https://www.imdb.com/india/top-rated-indian-movies/"
page=requests.get(url)
soup=BeautifulSoup(page.text,'html.parser')
main_div=soup.find('div',class_="lister")
div=main_div.find('tbody',class_="lister-list")
trs=div.findAll('tr')
num=0
main_data=[]
for tr in trs:
num=num+1
dic={}
title_colum=tr.find('td',class_="titleColumn").a
dic['name']=title_colum.get_text()
dic['position']=num
year=tr.find('span',class_="secondaryInfo")
new_year=year['year']=year.get_text()
cut=int(new_year[1:5])
dic['year']=cut
movie_rateing=tr.find('td',class_="ratingColumn imdbRating")
rateing=movie_rateing.get_text()
cut_rateing=float(rateing[3:5])
dic['rateing']=cut_rateing
dic['url']="https://www.imdb.com"+title_colum["href"][:17]
main_data.append(dic)
with open("movie.json","w") as file:
read = json.dumps(main_data)
file.write(read)
file.close()
return(main_data)
top_movies=scrap_top_list()
def scrap_movie_cast(movie_cast_url):
if os.path.exists("movie_cast.json"):
with open ("movie_cast.json",'r') as file:
read=file.read()
main_data=json.loads(read)
return(main_data)
else:
c_data={}
count=1
for i in movie_cast_url:
url=(i["url"])
page=requests.get(url)
soup=BeautifulSoup(page.text,'html.parser')
div=soup.find_all('div',class_="see-more")
for i in div:
if "See full cast »"==i.text.strip():
cast=i.find('a').get("href")
url1=url+cast
page1=requests.get(url1)
bs4=BeautifulSoup(page.text,'html.parser')
table=bs4.find('table',class_="cast_list")
tbody=table.find_all('tr')
cast_data=[]
for i in tbody:
td=i.find_all('td',class_="")
for j in td:
a=j.find('a')
Id=a.get('href')[6:15]
name=a.text.strip()
dict1={"imdb_id":Id,
"name":name}
cast_data.append(dict1)
c_data["cast of "+str(count)+" movie"]=cast_data
count+=1
with open("movie_cast.json","w") as file:
read1 = json.dumps(c_data)
file.write(read1)
file.close()
return(c_data)
pprint(scrap_movie_cast(top_movies)) |
prince21298/SCRAPING | task9/task9.py | <filename>task9/task9.py<gh_stars>1-10
import requests,random,time
from bs4 import BeautifulSoup
import json
import os.path
from os import path
from pprint import pprint
def scrap_top_list():
if os.path.exists("movie.json"):
with open ("movie.json",'r') as file:
read=file.read()
main_data=json.loads(read)
return(main_data)
else:
url=" https://www.imdb.com/india/top-rated-indian-movies/"
page=requests.get(url)
soup=BeautifulSoup(page.text,'html.parser')
main_div=soup.find('div',class_="lister")
div=main_div.find('tbody',class_="lister-list")
trs=div.findAll('tr')
num=0
main_data=[]
for tr in trs:
num=num+1
dic={}
title_colum=tr.find('td',class_="titleColumn").a
dic['name']=title_colum.get_text()
dic['position']=num
year=tr.find('span',class_="secondaryInfo")
new_year=year['year']=year.get_text()
cut=int(new_year[1:5])
dic['year']=cut
movie_rateing=tr.find('td',class_="ratingColumn imdbRating")
rateing=movie_rateing.get_text()
cut_rateing=float(rateing[3:5])
dic['rateing']=cut_rateing
dic['url']="https://www.imdb.com"+title_colum["href"][:17]
main_data.append(dic)
with open("movie.json","w") as file:
read = json.dumps(main_data)
file.write(read)
file.close()
return(main_data)
top_movies=scrap_top_list()
def scrap_movie_detail(movie_url):
randomtime=random.randint(1,3)
movie_data={}
timesleep=time.sleep(randomtime)
data=requests.get(movie_url)
soup=BeautifulSoup(data.text,'html.parser')
div=soup.find('div',class_="title_wrapper")
title=div.find('h1').get_text().split()
title.pop()
title1=(" ".join(title))
movie_data["name"]=title1
div1=soup.find_all('div',class_="credit_summary_item")
for i in div1:
h4=i.find("h4")
if h4:
if h4.text=="Directors:":
lan=i.find_all("a")
d_list=[]
for j in lan:
Directors=j.get_text()
d_list.append(Directors)
movie_data["Directors"]=d_list
div2=soup.find("div",{"class":"article","id":"titleDetails"})
all_div2=div2.find_all('div',class_="txt-block")
for i in all_div2:
h4=i.find("h4")
if h4:
if h4.text=="Country:":
con=i.find('a')
co=con.text
movie_data["Country"]=co
if h4.text=="Language:":
lan=i.find_all('a')
l=[]
for j in lan:
lan1=j.text
l.append(lan1)
movie_data["Language"]=l
div3=soup.find('div',class_="poster")
poster=div3.find('a').img["src"]
movie_data["poster_image_url"]=poster
div4=soup.find('div',class_="summary_text")
Bio= div4.text
movie_data["Bio"]=Bio
div5=soup.find('div',class_="title_wrapper")
t=div5.find('time').text.strip().split()
m=0
for i in t:
if "h" in i:
h=int(i.strip("h"))*60
elif "min" in i:
m+=int(i.strip("min"))
minutes=h+m
movie_data["rumtime"]=minutes
a=div5.find("div",class_="subtext")
al=a.find_all('a')
all_a=[]
for i in al:
all_a.append(i)
all_a.pop()
jenre=[]
for i in all_a:
j=i.text
jenre.append(j)
movie_data["genre"]=jenre
return (movie_data)
def get_scrap_movie_detail(movie_list):
row=[]
for i in movie_list:
u1=i["url"]
u2=u1[27:36]+".json"
if os.path.exists(u2):
with open (u2,'r') as file:
read=file.read()
ro=json.loads(read)
else:
ro=scrap_movie_detail(i["url"])
with open(u2,"w") as file:
read = json.dumps(ro)
file.write(read)
file.close()
row.append(ro)
return(row)
pprint(get_scrap_movie_detail(top_movies))
|
prince21298/SCRAPING | task3.py | import requests
from bs4 import BeautifulSoup
import json
import os.path
from os import path
import pprint
def scrap_top_list():
if os.path.exists("movie.json"):
with open ("movie.json",'r') as file:
read=file.read()
main_data=json.loads(read)
return(main_data)
else:
url=" https://www.imdb.com/india/top-rated-indian-movies/"
page=requests.get(url)
soup=BeautifulSoup(page.text,'html.parser')
main_div=soup.find('div',class_="lister")
div=main_div.find('tbody',class_="lister-list")
trs=div.findAll('tr')
num=0
main_data=[]
for tr in trs:
num=num+1
dic={}
title_colum=tr.find('td',class_="titleColumn").a
dic['name']=title_colum.get_text()
dic['position']=num
year=tr.find('span',class_="secondaryInfo")
new_year=year['year']=year.get_text()
cut=int(new_year[1:5])
dic['year']=cut
movie_rateing=tr.find('td',class_="ratingColumn imdbRating")
rateing=movie_rateing.get_text()
cut_rateing=float(rateing[3:5])
dic['rateing']=cut_rateing
dic['url']="https://www.imdb.com"+title_colum["href"][:17]
main_data.append(dic)
with open("movie.json","w") as file:
read = json.dumps(main_data)
file.write(read)
file.close()
return(main_data)
def decade_year():
top_250=(scrap_top_list())
allyear = []
for movie in top_250:
if movie["year"] not in allyear:
allyear.append(movie["year"])
allyear.sort()
mini=min(allyear)
maxi=max(allyear)
movie_year=[]
while True:
if mini%10!=0:
mini-=1
elif maxi%10!=0:
maxi+=1
else:
break
for i in range(mini,maxi,10):
movie_year.append(i)
storage = {}
for year in movie_year:
Y = []
for i in top_250:
if year<i["year"] and year+10>i["year"]:
Y.append(i)
storage[year] = Y
return (storage)
pprint.pprint(decade_year())
|
yapdianang/fast_transformer | onmt/modules/copy_generator.py | import math
import torch
import torch.nn as nn
from onmt.utils.misc import aeq
from onmt.utils.loss import LossComputeBase
def collapse_copy_scores(scores, batch, tgt_vocab, src_vocabs,
batch_dim=1, batch_offset=None):
"""
Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambiguous.
"""
offset = len(tgt_vocab)
for b in range(scores.size(batch_dim)):
blank = []
fill = []
batch_id = batch_offset[b] if batch_offset is not None else b
index = batch.indices.data[batch_id]
src_vocab = src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab.stoi[sw]
if ti != 0:
blank.append(offset + i)
fill.append(ti)
if blank:
blank = torch.Tensor(blank).type_as(batch.indices.data)
fill = torch.Tensor(fill).type_as(batch.indices.data)
score = scores[:, b] if batch_dim == 1 else scores[b]
score.index_add_(1, fill, score.index_select(1, blank))
score.index_fill_(1, blank, 1e-10)
return scores
class CopyGenerator(nn.Module):
"""An implementation of pointer-generator networks
:cite:`DBLP:journals/corr/SeeLM17`.
These networks consider copying words
directly from the source sequence.
The copy generator is an extended version of the standard
generator that computes three values.
* :math:`p_{softmax}` the standard softmax over `tgt_dict`
* :math:`p(z)` the probability of copying a word from
the source
* :math:`p_{copy}` the probility of copying a particular word.
taken from the attention distribution directly.
The model returns a distribution over the extend dictionary,
computed as
:math:`p(w) = p(z=1) p_{copy}(w) + p(z=0) p_{softmax}(w)`
.. mermaid::
graph BT
A[input]
S[src_map]
B[softmax]
BB[switch]
C[attn]
D[copy]
O[output]
A --> B
A --> BB
S --> D
C --> D
D --> O
B --> O
BB --> O
Args:
input_size (int): size of input representation
output_size (int): size of output vocabulary
pad_idx (int)
"""
def __init__(self, input_size, output_size, pad_idx, conv_first):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(input_size, output_size)
self.linear_copy = nn.Linear(input_size, 1)
self.pad_idx = pad_idx
self.conv_first = conv_first
if conv_first:
self.conv_transpose = nn.ConvTranspose1d(1, 1, kernel_size=3, stride=3)
self.conv_transpose_pad1 = nn.ConvTranspose1d(1, 1, kernel_size=3, stride=3, output_padding=1)
self.conv_transpose_pad2 = nn.ConvTranspose1d(1, 1, kernel_size=3, stride=3, output_padding=2)
def forward(self, hidden, attn, src_map):
"""
Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by copying
source words.
Args:
hidden (FloatTensor): hidden outputs ``(batch x tlen, input_size)``
attn (FloatTensor): attn for each ``(batch x tlen, input_size)``
src_map (FloatTensor):
A sparse indicator matrix mapping each source word to
its index in the "extended" vocab containing.
``(src_len, batch, extra_words)``
"""
if self.conv_first:
attn = torch.unsqueeze(attn, 1)
original_seq_len = src_map.shape[0]
if original_seq_len % 3 == 0:
attn = self.conv_transpose(attn)
elif original_seq_len % 3 == 1:
attn = self.conv_transpose_pad1(attn)
else:
attn = self.conv_transpose_pad2(attn)
attn = torch.squeeze(attn, 1)
# CHECKS
batch_by_tlen, _ = hidden.size()
batch_by_tlen_, slen = attn.size()
slen_, batch, cvocab = src_map.size()
aeq(batch_by_tlen, batch_by_tlen_)
aeq(slen, slen_)
# Original probabilities.
logits = self.linear(hidden)
logits[:, self.pad_idx] = -float('inf')
prob = torch.softmax(logits, 1)
# Probability of copying p(z=1) batch.
p_copy = torch.sigmoid(self.linear_copy(hidden))
# Probability of not copying: p_{word}(w) * (1 - p(z))
out_prob = torch.mul(prob, 1 - p_copy)
mul_attn = torch.mul(attn, p_copy)
copy_prob = torch.bmm(
mul_attn.view(-1, batch, slen).transpose(0, 1),
src_map.transpose(0, 1)
).transpose(0, 1)
copy_prob = copy_prob.contiguous().view(-1, cvocab)
return torch.cat([out_prob, copy_prob], 1)
class CopyGeneratorLoss(nn.Module):
"""Copy generator criterion."""
def __init__(self, vocab_size, force_copy, unk_index=0,
ignore_index=-100, eps=1e-20):
super(CopyGeneratorLoss, self).__init__()
self.force_copy = force_copy
self.eps = eps
self.vocab_size = vocab_size
self.ignore_index = ignore_index
self.unk_index = unk_index
def forward(self, scores, align, target):
"""
Args:
scores (FloatTensor): ``(batch_size*tgt_len)`` x dynamic vocab size
whose sum along dim 1 is less than or equal to 1, i.e. cols
softmaxed.
align (LongTensor): ``(batch_size x tgt_len)``
target (LongTensor): ``(batch_size x tgt_len)``
"""
# probabilities assigned by the model to the gold targets
vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze(1)
# probability of tokens copied from source
copy_ix = align.unsqueeze(1) + self.vocab_size
copy_tok_probs = scores.gather(1, copy_ix).squeeze(1)
# Set scores for unk to 0 and add eps
copy_tok_probs[align == self.unk_index] = 0
copy_tok_probs += self.eps # to avoid -inf logs
# find the indices in which you do not use the copy mechanism
non_copy = align == self.unk_index
if not self.force_copy:
non_copy = non_copy | (target != self.unk_index)
probs = torch.where(
non_copy, copy_tok_probs + vocab_probs, copy_tok_probs
)
if math.isnan(probs.log().sum()):
probs = probs - torch.min(probs) + self.eps
loss = -probs.log() # just NLLLoss; can the module be incorporated?
# Drop padding.
loss[target == self.ignore_index] = 0
return loss
class CopyGeneratorLossCompute(LossComputeBase):
"""Copy Generator Loss Computation."""
def __init__(self, criterion, generator, tgt_vocab, normalize_by_length):
super(CopyGeneratorLossCompute, self).__init__(criterion, generator)
self.tgt_vocab = tgt_vocab
self.normalize_by_length = normalize_by_length
def _make_shard_state(self, batch, output, range_, attns):
"""See base class for args description."""
if getattr(batch, "alignment", None) is None:
raise AssertionError("using -copy_attn you need to pass in "
"-dynamic_dict during preprocess stage.")
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1], :, 0],
"copy_attn": attns.get("copy"),
"align": batch.alignment[range_[0] + 1: range_[1]]
}
def _compute_loss(self, batch, output, target, copy_attn, align):
"""Compute the loss.
The args must match :func:`self._make_shard_state()`.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info.
"""
target = target.view(-1)
align = align.view(-1)
scores = self.generator(
self._bottle(output), self._bottle(copy_attn), batch.src_map
)
loss = self.criterion(scores, align, target)
# print ("loss: {}".format(loss))
# this block does not depend on the loss value computed above
# and is used only for stats
scores_data = collapse_copy_scores(
self._unbottle(scores.clone(), batch.batch_size),
batch, self.tgt_vocab, batch.dataset.src_vocabs)
scores_data = self._bottle(scores_data)
# this block does not depend on the loss value computed above
# and is used only for stats
# Correct target copy token instead of <unk>
# tgt[i] = align[i] + len(tgt_vocab)
# for i such that tgt[i] == 0 and align[i] != 0
target_data = target.clone()
unk = self.criterion.unk_index
correct_mask = (target_data == unk) & (align != unk)
offset_align = align[correct_mask] + len(self.tgt_vocab)
target_data[correct_mask] += offset_align
# Compute sum of perplexities for stats
stats = self._stats(loss.sum().clone(), scores_data, target_data)
# this part looks like it belongs in CopyGeneratorLoss
if self.normalize_by_length:
# Compute Loss as NLL divided by seq length
tgt_lens = batch.tgt[:, :, 0].ne(self.padding_idx).sum(0).float()
# Compute Total Loss per sequence in batch
loss = loss.view(-1, batch.batch_size).sum(0)
# Divide by length of each sequence and sum
loss = torch.div(loss, tgt_lens).sum()
else:
loss = loss.sum()
return loss, stats
|
yapdianang/fast_transformer | onmt/encoders/transformer.py | <reponame>yapdianang/fast_transformer<gh_stars>0
"""
Implementation of "Attention is All You Need"
"""
import torch.nn as nn
from onmt.encoders.encoder import EncoderBase
from onmt.modules import MultiHeadedAttention
from onmt.modules import MultiHeadedStridedAttention
from onmt.modules.position_ffn import PositionwiseFeedForward
class TransformerEncoderLayer(nn.Module):
"""
A single layer of the transformer encoder.
Args:
d_model (int): the dimension of keys/values/queries in
MultiHeadedAttention, also the input size of
the first-layer of the PositionwiseFeedForward.
heads (int): the number of head for MultiHeadedAttention.
d_ff (int): the second-layer of the PositionwiseFeedForward.
dropout (float): dropout probability(0-1.0).
"""
def __init__(self, d_model, heads, d_ff, dropout,
max_relative_positions=0, strided_attn=False, conv_k_v=False):
super(TransformerEncoderLayer, self).__init__()
self.strided_attn = strided_attn
self.conv_k_v = conv_k_v
if self.strided_attn:
self.self_attn = MultiHeadedStridedAttention(
heads, d_model, dropout=dropout,
max_relative_positions=max_relative_positions)
else:
self.self_attn = MultiHeadedAttention(
heads, d_model, dropout=dropout,
max_relative_positions=max_relative_positions)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.conv1d_k_v = nn.Conv1d(d_model, d_model, kernel_size=3, stride=3)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, inputs, mask):
"""
Args:
inputs (FloatTensor): ``(batch_size, src_len, model_dim)``
mask (LongTensor): ``(batch_size, src_len, src_len)``
Returns:
(FloatTensor):
* outputs ``(batch_size, src_len, model_dim)``
"""
input_norm = self.layer_norm(inputs)
q, k, v = input_norm, input_norm, input_norm
if self.conv_k_v:
k = self.conv1d_k_v(k.transpose(1, 2)).transpose(1, 2)
v = self.conv1d_k_v(v.transpose(1, 2)).transpose(1, 2)
context, _ = self.self_attn(k, v, q,
mask=mask, type="self")
out = self.dropout(context) + inputs
return self.feed_forward(out)
class TransformerEncoder(EncoderBase):
"""The Transformer encoder from "Attention is All You Need"
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`
.. mermaid::
graph BT
A[input]
B[multi-head self-attn]
C[feed forward]
O[output]
A --> B
B --> C
C --> O
Args:
num_layers (int): number of encoder layers
d_model (int): size of the model
heads (int): number of heads
d_ff (int): size of the inner FF layer
dropout (float): dropout parameters
embeddings (onmt.modules.Embeddings):
embeddings to use, should have positional encodings
Returns:
(torch.FloatTensor, torch.FloatTensor):
* embeddings ``(src_len, batch_size, model_dim)``
* memory_bank ``(src_len, batch_size, model_dim)``
"""
def __init__(self, num_layers, d_model, heads, d_ff, dropout, embeddings,
max_relative_positions, conv_first, strided_attn, conv_encoder_deconv, conv_k_v):
super(TransformerEncoder, self).__init__()
self.embeddings = embeddings
self.transformer = nn.ModuleList(
[TransformerEncoderLayer(
d_model, heads, d_ff, dropout,
max_relative_positions=max_relative_positions,
strided_attn=strided_attn, conv_k_v=conv_k_v)
for i in range(num_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.conv_first = conv_first
self.conv_encoder_deconv = conv_encoder_deconv
if conv_k_v or conv_encoder_deconv:
self.conv1d = nn.Conv1d(d_model, d_model, kernel_size=3, stride=3)
self.mask_pool = nn.MaxPool1d(kernel_size=3, stride=3)
if conv_encoder_deconv:
self.conv_transpose = nn.ConvTranspose1d(d_model, d_model, kernel_size=3, stride=3)
self.conv_transpose_pad1 = nn.ConvTranspose1d(d_model, d_model, kernel_size=3, stride=3, output_padding=1)
self.conv_transpose_pad2 = nn.ConvTranspose1d(d_model, d_model, kernel_size=3, stride=3, output_padding=2)
# assert (not(self.conv_first and self.conv_encoder_deconv))
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.enc_layers,
opt.enc_rnn_size,
opt.heads,
opt.transformer_ff,
opt.dropout,
embeddings,
opt.max_relative_positions,
opt.conv_first,
opt.strided_attn,
opt.conv_encoder_deconv,
opt.conv_k_v)
def forward(self, src, lengths=None):
"""See :func:`EncoderBase.forward()`"""
self._check_args(src, lengths)
emb = self.embeddings(src)
out = emb.transpose(0, 1).contiguous()
words = src[:, :, 0].transpose(0, 1)
w_batch, w_len = words.size()
padding_idx = self.embeddings.word_padding_idx
mask = words.data.eq(padding_idx).unsqueeze(1) # [B, 1, T]
# if set conv_first=True, convolve first for memory compressed attention and reduce seq length
original_seq_len = out.shape[1]
if self.conv_first or self.conv_encoder_deconv:
out = self.conv1d(out.transpose(1, 2)).transpose(1, 2)
mask = self.mask_pool(mask.float()).byte()
# Run the forward pass of every layer of the tranformer.
for layer in self.transformer:
out = layer(out, mask)
out = self.layer_norm(out)
if self.conv_encoder_deconv:
out = out.transpose(1, 2)
if original_seq_len % 3 == 0:
out = self.conv_transpose(out)
elif original_seq_len % 3 == 1:
out = self.conv_transpose_pad1(out)
else:
out = self.conv_transpose_pad2(out)
out = out.transpose(1, 2)
return emb, out.transpose(0, 1).contiguous(), lengths
|
yapdianang/fast_transformer | onmt/modules/multi_headed_strided_attn.py | import math
import torch
import torch.nn as nn
from onmt.utils.misc import generate_relative_positions_matrix,\
relative_matmul
class MultiHeadedStridedAttention(nn.Module):
def __init__(self, head_count, model_dim, dropout=0.1,
max_relative_positions=0):
assert model_dim % head_count == 0
self.dim_per_head = model_dim // head_count
self.model_dim = model_dim
super(MultiHeadedStridedAttention, self).__init__()
self.head_count = head_count
self.linear_keys = nn.Linear(model_dim,
head_count * self.dim_per_head)
self.linear_values = nn.Linear(model_dim,
head_count * self.dim_per_head)
self.linear_query = nn.Linear(model_dim,
head_count * self.dim_per_head)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.final_linear = nn.Linear(model_dim, model_dim)
self.max_relative_positions = max_relative_positions
if max_relative_positions > 0:
vocab_size = max_relative_positions * 2 + 1
self.relative_positions_embeddings = nn.Embedding(
vocab_size, self.dim_per_head)
def perform_attention(self,
query, dim_per_head,
key, relations_keys, mask,
value, relations_values,
batch_size, head_count,
query_len, key_len,
shape, unshape):
# 2) Calculate and scale scores.
query = query / math.sqrt(dim_per_head)
# batch x num_heads x query_len x key_len
query_key = torch.matmul(query, key.transpose(2, 3))
if self.max_relative_positions > 0 and type == "self":
scores = query_key + relative_matmul(query, relations_keys, True)
else:
scores = query_key
scores = scores.float()
if mask is not None:
mask = mask.unsqueeze(1) # [B, 1, 1, T_values]
scores = scores.masked_fill(mask, -1e18)
# 3) Apply attention dropout and compute context vectors.
attn = self.softmax(scores).to(query.dtype)
drop_attn = self.dropout(attn)
context_original = torch.matmul(drop_attn, value)
if self.max_relative_positions > 0 and type == "self":
context = unshape(context_original
+ relative_matmul(drop_attn,
relations_values,
False))
else:
context = unshape(context_original)
output = self.final_linear(context)
# Return one attn
top_attn = attn \
.view(batch_size, head_count,
query_len, key_len)[:, 0, :, :] \
.contiguous()
return output, top_attn
def forward(self, key, value, query, mask=None,
layer_cache=None, type=None):
batch_size = key.size(0)
dim_per_head = self.dim_per_head
head_count = self.head_count
key_len = key.size(1)
query_len = query.size(1)
device = key.device
def shape(x):
"""Projection."""
return x.view(batch_size, -1, head_count, dim_per_head) \
.transpose(1, 2)
def unshape(x):
"""Compute context."""
return x.transpose(1, 2).contiguous() \
.view(batch_size, -1, head_count * dim_per_head)
# 1) Project key, value, and query.
if layer_cache is not None:
if type == "self":
query, key, value = self.linear_query(query),\
self.linear_keys(query),\
self.linear_values(query)
key = shape(key)
value = shape(value)
if layer_cache["self_keys"] is not None:
key = torch.cat(
(layer_cache["self_keys"].to(device), key),
dim=2)
if layer_cache["self_values"] is not None:
value = torch.cat(
(layer_cache["self_values"].to(device), value),
dim=2)
layer_cache["self_keys"] = key
layer_cache["self_values"] = value
elif type == "context":
query = self.linear_query(query)
if layer_cache["memory_keys"] is None:
key, value = self.linear_keys(key),\
self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key, value = layer_cache["memory_keys"],\
layer_cache["memory_values"]
layer_cache["memory_keys"] = key
layer_cache["memory_values"] = value
else:
key = self.linear_keys(key)
value = self.linear_values(value)
query = self.linear_query(query)
key = shape(key)
value = shape(value)
relations_keys = None
relations_values = None
if self.max_relative_positions > 0 and type == "self":
key_len = key.size(2)
# 1 or key_len x key_len
relative_positions_matrix = generate_relative_positions_matrix(
key_len, self.max_relative_positions,
cache=True if layer_cache is not None else False)
# 1 or key_len x key_len x dim_per_head
relations_keys = self.relative_positions_embeddings(
relative_positions_matrix.to(device))
# 1 or key_len x key_len x dim_per_head
relations_values = self.relative_positions_embeddings(
relative_positions_matrix.to(device))
query = shape(query)
key_len = key.size(2)
query_len = query.size(2)
splice_inds = [[0, query_len//2], [query_len//4, ((query_len//4)*3)], [query_len//2, query_len]]
outputs = []
top_attns = []
for splice in splice_inds:
q_len = splice[1] - splice[0]
query_split = query[:, :, splice[0]:splice[1], :]
key_split = key[:, :, splice[0]:splice[1], :]
value_split = value[:, :, splice[0]:splice[1], :]
mask_split = mask[:, :, splice[0]:splice[1]]
output, top_attn = self.perform_attention(query_split, dim_per_head, key_split, relations_keys, mask_split, value_split, relations_values, batch_size, head_count, q_len, q_len, shape, unshape)
outputs.append(output)
top_attns.append(top_attn)
# TODO: add .cuda() after torch.zeros() to run on GPU
output = torch.zeros((query.shape[0], query.shape[2], outputs[0].shape[2])).cuda()
output[:, splice_inds[1][0]:splice_inds[1][1], :] = outputs[1]
amt = (outputs[0].shape[1]//3)*2
output[:, 0:amt, :] = outputs[0][:, 0:amt, :]
amt = (outputs[2].shape[1]//3)*2
output[:, -amt:, :] = outputs[2][:, -amt:, :]
# TODO: add .cuda() after torch.zeros() to run on GPU
top_attn = torch.zeros((query.shape[0], query_len, query_len)).cuda()
top_attn[:, splice_inds[1][0]:splice_inds[1][1], splice_inds[1][0]:splice_inds[1][1]] = top_attns[1]
amt = (outputs[0].shape[1]//3)*2
top_attn[:, 0:amt, 0:amt] = top_attns[0][:, 0:amt, 0:amt]
amt = (outputs[2].shape[1]//3)*2
top_attn[:, -amt:, -amt:] = top_attns[2][:, -amt:, -amt:]
return output, top_attn
# NOTE: Relations_keys and relations_values shapes should be None
# output, top_attn = self.perform_attention(query, dim_per_head,
# key, relations_keys, mask,
# value, relations_values,
# batch_size, head_count,
# query_len, key_len,
# shape, unshape)
# return output, top_attn
|
Artemiche/polypy | poly.py | import colorsys
import ctypes
from itertools import product
from multiprocessing import Process, Array
from PIL import Image, ImageDraw, ImageFilter
import random
from scipy.spatial import Delaunay
import sys
# Global parameters that are not (yet) automatically computed
POINT_COUNT = 150
EDGE_THRESHOLD = 172
EDGE_RATIO = .98
DARKENING_FACTOR = 35
SPEEDUP_FACTOR_X = 1
SPEEDUP_FACTOR_Y = 1
CONCURRENCY_FACTOR = 3
def main():
if len(sys.argv) != 2 or not sys.argv[1]:
print("Please provide the name of the file to process as an argument")
return 1
im = Image.open(sys.argv[1])
# Random point generation
points = []
generate_random(im, points)
# Generation of "interesting" points
generate_edges(im, points)
# Triangulation and color listing
triangles, colors = triangulate(im, points)
# Final color calculation and drawing
draw(im, points, triangles, colors)
im.save("result.jpg", "jpeg", quality=95)
return 0
# generate_random places semi-random point in a list
def generate_random(im, points):
prop_x, prop_y = get_point_propagation(*im.size)
point_distance = get_point_distance(*im.size)
for _ in range(POINT_COUNT):
x = random.randrange(round((im.size[0] + prop_x) / point_distance)) * \
point_distance - (prop_x / 2)
y = random.randrange(round((im.size[1] + prop_y) / point_distance)) * \
point_distance - (prop_y / 2)
points.append([x, y])
# get_point_propagation returns an arbirary value as the limit of triangle
# points outside of the canvas
def get_point_propagation(width, height):
return (width / 4, height / 4)
# get_point_distance returns the minimum distance between points (arbitrary
# value)
def get_point_distance(width, height):
return min(width, height) / 16
# generate_edges generates semi-random points in a list, based on an image that
# was applied an edge-detection kernel
def generate_edges(im, points):
im_edges = im.filter(ImageFilter.SHARPEN).filter(ImageFilter.FIND_EDGES)
for x, y in product(range(im.size[0] - 1), range(im.size[1] - 1)):
if get_grayscale(*im_edges.getpixel((x, y))) > EDGE_THRESHOLD and \
random.random() > EDGE_RATIO:
points.append([x, y])
# get_grayscale returns the gray level of a pixel based on its RGB colors
def get_grayscale(r, g, b):
return 0.2126*r + 0.7152*g + 0.0722*b
# triangulate generates triangles between points and the list of the colors of
# the pixels contained within
def triangulate(im, points):
triangles = Delaunay(points)
colors = Array(ctypes.c_uint64, im.size[0] * im.size[1], lock=True)
jobs = []
for i in range(CONCURRENCY_FACTOR):
p = Process(target=triangulate_worker, args=(im, triangles, colors, i,
CONCURRENCY_FACTOR))
jobs.append(p)
p.start()
for i in jobs:
i.join()
decoded_colors = [None] * len(triangles.simplices)
# Color decoding
for i, c in enumerate(colors):
t = (c & 0xFFFF << 32) >> 32
if t == 0xFFFF:
continue
if not decoded_colors[t]:
decoded_colors[t] = []
decoded_colors[t].append((c & 0xFF,
(c & 0xFF00) >> 8,
(c & 0xFF0000) >> 16))
return (triangles, decoded_colors)
# triangulate_worker works concurrently by manipulating different rows
def triangulate_worker(im, triangles, colors, worker_index, worker_count):
for x, y in product(range(SPEEDUP_FACTOR_X,
im.size[0] - 1,
SPEEDUP_FACTOR_X),
# Workers treat different rows
range(worker_index * SPEEDUP_FACTOR_Y,
im.size[1] - 1,
worker_count * SPEEDUP_FACTOR_Y)
):
t = triangles.find_simplex((x, y)).flat[0]
pixel_index = y*im.size[0] + x
colors[pixel_index] = 0xFFFF << 32
if not ~t:
continue
# Colors and triangle ID are encoded to integers for fixed-size
# storage in the Array object through a 64-bit integer
(r, g, b) = im.getpixel((x, y))
colors[pixel_index] = ((t << 32) + (b << 16) + (g << 8) + r)
return
# draw draws triangles on an existing image using the average colors of the
# pixels contained within them and a random darkening
def draw(im, points, triangles, colors):
d = ImageDraw.Draw(im)
for t, t_colors in enumerate(colors):
end = (0, 0, 0)
if t_colors:
avg = [round(sum(y) / len(y)) for y in zip(*t_colors)]
# Random darkening of the triangles
(h, s, v) = colorsys.rgb_to_hsv(avg[0], avg[1], avg[2])
end = colorsys.hsv_to_rgb(h, s,
v - random.random() * DARKENING_FACTOR)
d.polygon([tuple(points[y]) for y in triangles.simplices[t]],
fill=tuple(map(lambda x: round(x), end)))
if __name__ == "__main__":
sys.exit(main())
|
brianjimenez/ant_thony | ant_thony.py | #!/usr/bin/env python
from __future__ import print_function
from multiprocessing import Process, cpu_count
import argparse
import logging
import os
logging.basicConfig(format='[Ant-Thony] %(levelname)s: %(message)s', level=logging.DEBUG)
class Task(object):
"""A task class"""
def __init__(self, command, path="."):
self.command = command
self.path = path
def run(self):
"""Runs a command in the given path"""
os.chdir(self.path)
os.system(self.command)
class Ant(Process):
"""Ant-Thony's buddies"""
def __init__(self, tasks):
super(Ant, self).__init__()
self.tasks = tasks
logging.info("{} ready with {} tasks".format(self.name, len(self.tasks)))
def run(self):
"""Runs all the assigned tasks"""
for task in self.tasks:
task.run()
logging.info("{} going back to the nest".format(self.name))
class Ant_Thony(object):
"""Our buddy Ant-Thony"""
def __init__(self, tasks, num_cpus=0):
try:
self.num_processes = int(num_cpus)
if self.num_processes < 1:
raise ValueError()
except (ValueError, TypeError):
logging.warning("Number of cores has not been specified or it is incorrect. Using all available cores.")
self.num_processes = cpu_count()
logging.info("Ant-Thony will use {} cores".format(self.num_processes))
self.tasks = tasks
self.num_tasks = len(tasks)
self.workers = []
workers_tasks = [tasks[i::self.num_processes] for i in xrange(self.num_processes)]
for i in range(self.num_processes):
worker = Ant(workers_tasks[i])
self.workers.append(worker)
def release(self):
logging.info("Swarming!")
for ant in self.workers:
ant.start()
for ant in self.workers:
ant.join()
logging.info("{} tasks done".format(self.num_tasks))
def go_home(self):
for ant in self.workers:
ant.terminate()
logging.info("All ants back to the nest")
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='ant_thony')
parser.add_argument("tasks_file_name", help="A file containing a task for each line", metavar="tasks_file_name")
parser.add_argument("--cores", "-cores", "-c", help="CPU cores to use", dest="cores", type=int, default=0)
args = parser.parse_args()
with open(args.tasks_file_name) as handle:
tasks = []
for line in handle:
if line and not line.startswith("#"):
tasks.append(Task(line.rstrip(os.linesep)))
anthony = Ant_Thony(tasks, args.cores)
anthony.release()
anthony.go_home()
|
mofojed/deephaven-plugin-matplotlib | src/deephaven/plugin/matplotlib/__init__.py | from deephaven.plugin import Registration
__version__ = "0.0.1.dev5"
class MatplotlibRegistration(Registration):
@classmethod
def register_into(cls, callback: Registration.Callback) -> None:
from . import figure_type
callback.register(figure_type.FigureType)
|
mofojed/deephaven-plugin-matplotlib | src/deephaven/plugin/matplotlib/figure_type.py | <reponame>mofojed/deephaven-plugin-matplotlib<filename>src/deephaven/plugin/matplotlib/figure_type.py
from io import BytesIO
from matplotlib.figure import Figure
from deephaven.plugin.object import Exporter, ObjectType
NAME = "matplotlib.figure.Figure"
class FigureType(ObjectType):
@property
def name(self) -> str:
return NAME
def is_type(self, object) -> bool:
return isinstance(object, Figure)
def to_bytes(self, exporter: Exporter, figure: Figure) -> bytes:
buf = BytesIO()
figure.savefig(buf, format='PNG')
return buf.getvalue()
|
LSU-4444-AI/five-in-a-row-alpha-zero | plot.py | import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
from keras.callbacks import History
from policy_value_net_keras import his_path
from keras.utils import plot_model
# Model accuracy and loss plots
def plot_model_history(model_details):
# Create sub-plots
# fig, axs = plt.subplots(1, 2, figsize=(15, 5))
# Summarize history for accuracy
# axs[0].plot(range(1, len(model_details.history['acc']) + 1),
# model_details.history['acc'])
# axs[0].plot(range(1, len(model_details.history['val_acc']) + 1),
# model_details.history['val_acc'])
# axs[0].set_title('Model Accuracy')
# axs[0].set_ylabel('Accuracy')
# axs[0].set_xlabel('Epoch')
# axs[0].set_xticks(np.arange(1, len(model_details.history['acc']) + 1),
# len(model_details.history['acc']) / 10)
# axs[0].legend(['train', 'val'], loc='best')
# Summarize history for loss
# axs[1].plot(range(1, len(model_details.history['loss']) + 1),
# model_details.history['loss'])
# axs[0].plot(range(1, len(model_details.history['val_loss']) + 1),
# model_details.history['val_loss'])
# axs[1].set_title('Model Loss')
# axs[1].set_ylabel('Loss')
# axs[1].set_xlabel('Epoch')
# axs[1].set_xticks(np.arange(1, len(model_details.history['loss']) + 1),
# len(model_details.history['loss']) / 10)
# axs[1].legend(['train', 'loss'], loc='best')
# Summarize history for loss
plt.plot(range(1, len(model_details.history['loss']) + 1),
model_details.history['loss'])
# axs[0].plot(range(1, len(model_details.history['val_loss']) + 1),
# model_details.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
# plt.xticks(np.arange(1, len(model_details.history['loss']) + 1),
# len(model_details.history['loss']) / 10)
plt.legend(['train', 'loss'], loc='best')
plt.show()
def plot(his_paths):
# Plot saved model history
for history_path in his_paths:
if os.path.exists(history_path):
# Save model history
with open(history_path, 'rb') as file_pi:
model_history = History()
model_history.history = pickle.load(file_pi)
plot_model_history(model_history)
else:
raise ValueError('No model history found')
# plot model by keras. pydot is required
# model = im.get_model()
# plot_model(model, to_file='./Doc/model.png')
if __name__ == '__main__':
plot(['./saved/model_6307_11782_history',
'./saved/model_22221_27518_history'])
|
mathieupoirier/rust-vmm-ci | integration_tests/conftest.py | <gh_stars>10-100
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import pytest
PROFILE_CI = "ci"
PROFILE_DEVEL = "devel"
WORKSPACE = "workspace"
CRATE = "crate"
def pytest_addoption(parser):
parser.addoption(
"--profile",
default=PROFILE_CI,
choices=[PROFILE_CI, PROFILE_DEVEL],
help="Profile for running the test: {} or {}".format(
PROFILE_CI,
PROFILE_DEVEL
)
)
parser.addoption(
"--no-cleanup",
action="store_true",
default=False,
help="Keep the coverage report in `kcov_output` directory. If this "
"flag is not provided, both coverage related directories are "
"removed."
)
parser.addoption(
"--test-scope",
default=WORKSPACE,
choices=[WORKSPACE, CRATE],
help="Defines the scope of running tests: {} or {}".format(
WORKSPACE,
CRATE
)
)
@pytest.fixture
def profile(request):
return request.config.getoption("--profile")
@pytest.fixture
def no_cleanup(request):
return request.config.getoption("--no-cleanup")
@pytest.fixture
def test_scope(request):
return request.config.getoption("--test-scope")
# This is used for defining global variables in pytest.
def pytest_configure():
# These constants are needed in tests, so this is the way that we can
# export them.
pytest.profile_ci = PROFILE_CI
pytest.profile_devel = PROFILE_DEVEL
pytest.workspace = WORKSPACE
pytest.crate = CRATE
|
mathieupoirier/rust-vmm-ci | test_run.py | <reponame>mathieupoirier/rust-vmm-ci<filename>test_run.py
#!/usr/bin/env python3
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
import json
import subprocess
import platform
import pathlib
import unittest
from argparse import ArgumentParser, RawTextHelpFormatter
from textwrap import dedent
PARENT_DIR = pathlib.Path(__file__).parent.resolve()
class TestsContainer(unittest.TestCase):
pass
def make_test_function(command):
def test(self):
subprocess.run(command, shell=True, check=True)
return test
def retrieve_test_list(
config_file=f"{PARENT_DIR}/.buildkite/test_description.json"
):
with open(config_file) as jsonFile:
test_list = json.load(jsonFile)
jsonFile.close()
return test_list
if __name__ == '__main__':
help_text = dedent(
"""
This script allows running all the tests at once on the local machine.
The tests "test_benchmark.py" and "test_commit_format.py" work properly
on the local machine only when the environment variables REMOTE and
BASE_BRANCH are set. Otherwise the default values are "origin" for the
remote name of the upstream repository and "main" for the name of the
base branch, and these tests may not work as expected.
"""
)
parser = ArgumentParser(description=help_text,
formatter_class=RawTextHelpFormatter)
parser.parse_args()
test_config = retrieve_test_list()
for test in test_config['tests']:
command = test['command']
command = command.replace("{target_platform}", platform.machine())
test_func = make_test_function(command)
setattr(TestsContainer, 'test_{}'.format(test['test_name']), test_func)
unittest.main(verbosity=2)
|
mathieupoirier/rust-vmm-ci | integration_tests/test_commit_format.py | <gh_stars>10-100
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Test the commit message format.
This test works properly on the local machine only when the environment
variables REMOTE and BASE_BRANCH are set. Otherwise the default values
are "origin" for the remote name of the upstream repository and "main"
for the name of the base branch, and this test may not work as expected.
"""
import os
import subprocess
from utils import get_cmd_output
COMMIT_TITLE_MAX_LEN = 50
COMMIT_BODY_LINE_MAX_LEN = 72
REMOTE = \
os.environ.get('BUILDKITE_REPO') or \
os.environ.get('REMOTE') or \
"origin"
BASE_BRANCH = \
os.environ.get('BUILDKITE_PULL_REQUEST_BASE_BRANCH') or \
os.environ.get('BASE_BRANCH') or \
"main"
def test_commit_format():
"""
Checks commit message format for the current PR's commits.
Checks if commit messages follow the 50/72 git commit rule
[https://www.midori-global.com/blog/2018/04/02/git-50-72-rule]
and if commits are signed.
"""
# Fetch the upstream repository.
fetch_base_cmd = "git fetch {} {}".format(REMOTE, BASE_BRANCH)
try:
subprocess.run(fetch_base_cmd, shell=True, check=True)
except subprocess.CalledProcessError:
raise NameError(
"The name of the base branch or remote is invalid. "
"See test documentation for more details."
) from None
# Get hashes of PR's commits in their abbreviated form for
# a prettier printing.
shas_cmd = "git log --no-merges --pretty=%h --no-decorate " \
"FETCH_HEAD..HEAD"
shas = get_cmd_output(shas_cmd)
for sha in shas.split():
# Do not enforce the commit rules when the committer is dependabot.
author_cmd = "git show -s --format='%ae' " + sha
author = get_cmd_output(author_cmd)
if "dependabot" in author:
continue
message_cmd = "git show --pretty=format:%B -s " + sha
message = get_cmd_output(message_cmd)
message_lines = message.split("\n")
assert len(message_lines) >= 3,\
"The commit '{}' should contain at least 3 lines: title, " \
"blank line and a sign-off one. Please check: " \
"https://www.midori-global.com/blog/2018/04/02/git-50-72-rule."\
.format(sha)
title = message_lines[0]
assert message_lines[1] == "",\
"For commit '{}', title is divided into multiple lines. " \
"Please keep it one line long and make sure you add a blank " \
"line between title and description.".format(sha)
assert len(title) <= COMMIT_TITLE_MAX_LEN,\
"For commit '{}', title exceeds {} chars. " \
"Please keep it shorter.".format(sha, COMMIT_TITLE_MAX_LEN)
found_signed_off = False
for line in message_lines[2:]:
if line.startswith("Signed-off-by: "):
found_signed_off = True
# If we found `Signed-off-by` line, then it means
# the commit message ended and we don't want to check
# line lengths anymore for the current commit.
break
assert len(line) <= COMMIT_BODY_LINE_MAX_LEN,\
"For commit '{}', message line '{}' exceeds {} chars. " \
"Please keep it shorter or split it in " \
"multiple lines.".format(sha, line,
COMMIT_BODY_LINE_MAX_LEN)
assert found_signed_off, "Commit '{}' is not signed. " \
"Please run 'git commit -s --amend' " \
"on it.".format(sha)
|
mathieupoirier/rust-vmm-ci | integration_tests/test_benchmark.py | <gh_stars>10-100
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Compare benchmark results before and after a pull request.
This test works properly on the local machine only when the environment
variables REMOTE and BASE_BRANCH are set. Otherwise the default values
are "origin" for the remote name of the upstream repository and "main"
for the name of the base branch, and this test may not work as expected.
"""
import os
import subprocess
from utils import get_repo_root_path
REMOTE = \
os.environ.get('BUILDKITE_REPO') or \
os.environ.get('REMOTE') or \
"origin"
BASE_BRANCH = \
os.environ.get('BUILDKITE_PULL_REQUEST_BASE_BRANCH') or \
os.environ.get('BASE_BRANCH') or \
"main"
# File used for saving the results of cargo bench
# when running on the PR branch.
PR_BENCH_RESULTS_FILE = "pr_bench_results"
# File used for saving the results of cargo bench
# when running on the upstream branch.
UPSTREAM_BENCH_RESULTS_FILE = "upstream_bench_results"
def test_bench():
"""Runs benchmarks before and after and compares the results."""
os.chdir(get_repo_root_path())
# Get numbers for current HEAD.
return_code, stdout, stderr = _run_cargo_bench(PR_BENCH_RESULTS_FILE)
# Even if it is the first time this test is run, the benchmark tests should
# pass. For this purpose, we need to explicitly check the return code.
assert return_code == 0, "stdout: {}\n stderr: {}".format(stdout, stderr)
# Get numbers from upstream tip, without the changes from the current PR.
_git_checkout_upstream_branch()
return_code, stdout, stderr = _run_cargo_bench(UPSTREAM_BENCH_RESULTS_FILE)
# Before checking any results, let's just go back to the PR branch.
# This way we make sure that the cleanup always happens even if the test
# fails.
_git_checkout_pr_branch()
if return_code == 0:
# In case this benchmark also ran successfully, we can call critcmp and
# compare the results.
_run_critcmp()
else:
# The benchmark did not run successfully, but it might be that it is
# because a benchmark does not exist. In this case, we do not want to
# fail the test.
if "error: no bench target named `main`" in stderr:
# This is a bit of a &*%^ way of checking if the benchmark does not
# exist. Hopefully it will be possible to check it in another way
# ...soon
print(
"There are no benchmarks in main. No comparison can happen."
)
else:
assert return_code == 0, "stdout: {}\n stderr: {}".format(
stdout, stderr)
def _run_cargo_bench(baseline):
"""Runs `cargo bench` and tags the baseline."""
process = subprocess.run(
"cargo bench --bench main --all-features -- --noplot "
"--save-baseline {}".format(baseline),
shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
return process.returncode, process.stdout.decode('utf-8'),\
process.stderr.decode('utf-8')
def _run_critcmp():
p = subprocess.run(
"critcmp {} {}".format(
UPSTREAM_BENCH_RESULTS_FILE, PR_BENCH_RESULTS_FILE
),
shell=True, check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
print(p.stdout.decode('utf-8'))
print('ERRORS')
print(p.stderr.decode('utf-8'))
def _git_checkout_upstream_branch():
subprocess.run(
"git fetch {} {}".format(REMOTE, BASE_BRANCH),
shell=True, check=True
)
subprocess.run(
"git checkout FETCH_HEAD",
shell=True, check=True
)
def _git_checkout_pr_branch():
subprocess.run(
"git checkout -",
shell=True, check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
|
Mansouroopi/TDD | account/tests/test_models.py | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_new_user_with_email_successful(self):
email = '<EMAIL>'
password = '<PASSWORD>'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEquals(user.email, email)
self.assertTrue(user.check_password(password))
def test_email_is_normalized(self):
email = '<EMAIL>'
password = '<PASSWORD>'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEquals(user.email, email.lower())
def test_create_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test10203040')
def test_create_superuser_with_email(self):
email = '<EMAIL>'
password = '<PASSWORD>'
user = get_user_model().objects.create_superuser(
email=email,
password=password
)
self.assertEquals(user.email, email.lower())
self.assertTrue(user.check_password(password))
|
zkerhcy/dotfiles | i3/conky/volumelevel.py | <reponame>zkerhcy/dotfiles
#!/usr/bin/python
import alsaaudio
m = alsaaudio.Mixer() # defined alsaaudio.Mixer to change volume
vol = m.getvolume()
print vol
##!/bin/bash
#vol=`amixer get Master | awk -F'[]%[]' '/%/ {if ($7 == "off") { print "Mute" } else { print $2"%" }}' | uniq | head -1`
#echo "$vol"
|
hbergh/jlrpy | setup.py | <filename>setup.py<gh_stars>0
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="jlrpy",
version="1.1.2",
author="Edvard",
author_email="<EMAIL>",
description="Control your Jaguar I-Pace",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ardevd/jlrpy",
py_modules=['jlrpy'],
install_requires=[
'uuid',
'datetime',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Development Status :: 5 - Production/Stable",
],
)
|
plista/news_knowledge_tree | data_processing/googlenews.py | <gh_stars>0
from dotenv import load_dotenv
from pprint import pprint
from newsapi import NewsApiClient
import urllib
import os
from bs4 import BeautifulSoup
from readability import Document
from find_entity import find_entity
import pickle
import pandas as pd
from mysql_caching import set_cache_entities, get_cached_entities
import functools
load_dotenv()
newsapi = NewsApiClient(api_key=os.environ.get("GOOGLE_NEWS_API"))
@functools.lru_cache(maxsize=512)
def html2text(url: str) -> str:
request = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0'})
html = urllib.request.urlopen(request).read()
doc = Document(html)
cleaned = "<h2>" + doc.short_title() + "</h2><br/>" + doc.summary()
soup = BeautifulSoup(cleaned, features="html.parser")
# kill all script and style elements
for script in soup(["script", "style"]):
script.extract() # rip it out
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = "\n".join(chunk for chunk in chunks if chunk)
return text
def process_headlines(language="en", country="gb"):
top_headline = newsapi.get_top_headlines(
language=language, country=country, page_size=100
)
result = []
for article in top_headline["articles"]:
try:
article["text"] = html2text(article["url"])
entities = get_cached_entities(article["url"])
if entities is None:
entities = find_entity(article["text"], language=language)
set_cache_entities(article["url"], entities)
article["entities"] = entities
print("It worked for", article["url"], flush=True)
result.append(article)
except urllib.error.HTTPError as e:
print(f"We cannot fetch for", article["url"], e, flush=True)
except urllib.error.URLError as e:
print(f"the url is wrong", article["url"], e, flush=True)
df = pd.DataFrame(result)
with open("headline_google.pickle", "wb") as output_file:
pickle.dump(df, output_file)
return df.to_json()
if __name__ == "__main__":
process_headlines(language="en", country="gb")
|
plista/news_knowledge_tree | data_processing/mysql_caching.py | import pymysql
from typing import Set
import hashlib
from pymysql.cursors import Cursor
import json
db = pymysql.connect(host="db", user="root", password="password")
cursor = db.cursor()
try:
cursor.execute("CREATE DATABASE opendigitalworld")
except pymysql.err.ProgrammingError:
print("Database already exists")
cursor.execute("USE opendigitalworld")
# try:
# cursor.execute("DROP TABLE opendigitalworld.article;")
# except pymysql.err.InternalError:
# print("no table already")
cursor.execute(
"""CREATE TABLE IF NOT EXISTS `opendigitalworld`.`article` (
`hash` VARCHAR(255),
`url` TEXT ,
`entities` TEXT,
PRIMARY KEY (`hash`)
) CHARSET=utf8;
"""
)
def get_cached_entities(url: str, cursor: Cursor = cursor) -> Set[str]:
hash = hashlib.sha256()
hash.update(url.encode("utf-8"))
hash_result = hash.hexdigest()
get_sql = """SELECT hash, entities FROM opendigitalworld.article WHERE hash='{}'""".format(
hash_result
)
cursor.execute(get_sql)
result = cursor.fetchone()
if result is not None:
entities = result[1]
return set(entities.split("|"))
def set_cache_entities(url, entities: Set[str], db=db):
hash = hashlib.sha256()
hash.update(url.encode("utf-8"))
hash_result = hash.hexdigest()
str_entities = "|".join(entities)
insert_sql = """INSERT INTO `opendigitalworld`.`article` (hash, url, entities) VALUES ('{}', '{}', '{}');""".format(
hash_result, url, str_entities
)
print(insert_sql)
cursor = db.cursor()
cursor.execute(insert_sql)
db.commit()
if __name__ == "__main__":
set_cache_entities("qwerty", set(["a", "b"]))
entities = get_cached_entities("qwerty")
print(entities)
|
plista/news_knowledge_tree | backend/utility.py | <reponame>plista/news_knowledge_tree
import os
import pickle
from copy import deepcopy
import random
import json
from pandas import DataFrame
import pandas as pd
from typing import Tuple, Dict
import urllib
random.seed(30)
class NoValidArticle(Exception):
"""no article can be found with the given constraints"""
pass
MAX_WIDTH = 7
MAX_LENGTH = 10
def process_dataframe(language, country):
jdf = urllib.request.urlopen(
f"http://data_processing:5001/get_headlines?language={language}&country={country}"
).read()
df = pd.read_json(json.loads(jdf.decode("utf-8")))
df["lower_entities"] = df.entities.apply(
lambda entities_set: set([i.lower() for i in entities_set])
)
return df
def init_entity_counters(df: DataFrame) -> Tuple[Dict[str, int], Dict[str, int]]:
"""
init the article
:param df:
:return:
"""
article_entity_counter = {}
for ent in df.lower_entities:
for entity in ent:
article_entity_counter[entity] = article_entity_counter.get(entity, 0) + 1
text_entity_counter = {}
for _, row in df.iterrows():
text = row.text
for entity in row.entities:
text_entity_counter[entity.lower()] = text.count(
entity
) + text_entity_counter.get(entity.lower(), 0)
return article_entity_counter, text_entity_counter
def get_cased_entities(df: DataFrame) -> Dict[str, str]:
"""
build dictionary to get upper cased entity from lower cased one
:param df:
:return:
"""
cased_entities = {}
for entities in df.entities:
for entity in entities:
if entity.lower() not in cased_entities:
cased_entities[entity.lower()] = entity
else:
old_entity = cased_entities[entity.lower()]
if n_uppercase(old_entity) < n_uppercase(entity):
cased_entities[entity.lower()] = entity
return cased_entities
def n_uppercase(text):
"""
Count number of upper case character in the text
:param text: text to analise
:return: number of upper case character
"""
return sum([char.isupper() for char in text])
def validate_tree_size(tree, level=MAX_LENGTH, width=MAX_WIDTH):
"""
Verify recursively than the tree is valid (ie each node has MAX_WIDTH)
:param tree: tree to verify
:param level: level on the tree we are (for recursion)
:param width: number of children each node should have
:return: Boolean to indicate if the tree is correct
"""
if level == 1:
return len(tree.get("children", [])) == width
elif len(tree["children"]) != width:
return False
else:
return all(
[validate_tree_size(children, level - 1) for children in tree["children"]]
)
def find_values(tree, key):
"""
Find in the tree all the value associated with the key
:param tree: tree to parse
:param key: key for which we are looking for the values
:return: list of values
"""
results = []
def _decode_dict(a_dict):
try:
results.append(a_dict[key])
except KeyError:
pass
return a_dict
json.loads(json.dumps(tree), object_hook=_decode_dict) # Return value ignored.
return results
def validate_tree_leaves(tree):
articles = find_values(tree, "url")
return len(articles) == len(set(articles))
def get_recommendations_with_article(language: str, country: str):
"""
Build a recommendation tree for the specified language and country
:return: recommendation tree for the specified language and country
"""
df = process_dataframe(language, country)
article_entity_counter, text_entity_counter = init_entity_counters(df)
cased_entities = get_cased_entities(df)
blacklist_articles = set()
blacklist_entities = set()
entities = set(cased_entities.keys())
possible_first_level_entities = select_most_frequent_entities(
entities, article_entity_counter, text_entity_counter
)
children = []
for child in possible_first_level_entities:
if child in blacklist_entities:
continue
try:
next_step_tree = rec_build_tree(
child,
set(),
set(),
set(),
df["lower_entities"],
article_entity_counter,
cased_entities,
article_entity_counter,
text_entity_counter,
df,
MAX_LENGTH - 1,
)
except NoValidArticle:
continue
blacklist_articles.update(find_values(next_step_tree, "id"))
blacklist_entities.update(
[s.lower() for s in find_values(next_step_tree, "name")]
)
children.append(next_step_tree)
if len(children) == MAX_WIDTH:
break
result = {"children": children, "name": "Source", "collapsed": False}
return result
def build_leaf(id, df):
"""
Build the leaf of the tree with the id specified
:param id: object id to use for the leaf
:return: dictionary for the leaf
"""
recommended_article = df.loc[id]
text = recommended_article.text
i = text.find(".") # we take the second sentence to avoid to repeat the title
abstract = text[i + 1 : i + 451].strip()
result = {
"name": '<b>{}</b><br><a href="{}"> Read the article </a>'.format(
recommended_article.title, recommended_article.url
),
"abstract": abstract,
"id": int(id),
"collapsed": True,
"value": 1,
}
return result
def rec_build_tree(
step_entity,
entities,
blacklist_entities,
blacklist_articles,
serie,
entity_counter,
cased_entities,
article_entity_counter,
text_entity_counter,
df,
max_rec=MAX_WIDTH,
):
"""
recursively build the tree
:param step_entity: entity to use at this recursion
:param entities: previous entities used to build this path
:param blacklist_entities: entities which should not appear in the tree
:param blacklist_articles: articles which should not appear in the tree
:param serie: pandas series with object id as index and entities as values
:param entity_counter: dictionary of entity as key and count as value. It is used to rank the entities.
:param max_rec: maximum number of recursion allowed for the tree
:return: recommendation tree
"""
step_tree = {}
step_tree["name"] = cased_entities[step_entity]
new_entities = deepcopy(entities)
new_entities.add(step_entity)
if max_rec == 0:
id = give_random_article(new_entities, blacklist_articles, serie)
step_tree["children"] = [build_leaf(id, df)]
else:
unique_id = has_unique_result(new_entities, blacklist_articles, serie)
if unique_id is None:
next_entities = extract_selected_entity(
new_entities,
blacklist_entities,
serie,
article_entity_counter,
text_entity_counter,
)
children = []
for child in next_entities:
if child in blacklist_entities:
continue
try:
next_step_tree = rec_build_tree(
child,
new_entities,
blacklist_entities,
blacklist_articles,
serie,
entity_counter,
cased_entities,
article_entity_counter,
text_entity_counter,
df,
max_rec - 1,
)
except NoValidArticle:
continue
blacklist_articles.update(find_values(next_step_tree, "id"))
blacklist_entities.update(
[s.lower() for s in find_values(next_step_tree, "name")]
)
children.append(next_step_tree)
if len(children) == MAX_WIDTH:
break
step_tree["children"] = children
step_tree["collapsed"] = True
elif unique_id == -1:
raise NoValidArticle
else:
step_tree["children"] = [build_leaf(unique_id, df)]
return step_tree
def give_random_article(entities, blacklist_articles, serie):
"""
give a article which has the
:param entities: entity set which should
:param blacklist_articles: articles which should not appear in the tree
:param serie: pandas series with object id as index and entities as values
:return:
"""
filtered_serie = serie.loc[
(serie.apply(lambda x: entities.issubset(x)))
& (~serie.index.isin(blacklist_articles))
]
if len(filtered_serie) == 0:
raise NoValidArticle
else:
return filtered_serie.index[random.randint(0, len(filtered_serie) - 1,)]
def has_unique_result(entities, blacklist_articles, serie):
"""
Check if the set of entities lead to a unique article selection
:param entities: entity set to verify
:param blacklist_articles: articles which should not appear in the tree
:param serie: pandas series with object id as index and entities as values
:return:
"""
filtered_serie = serie.loc[
(serie.apply(lambda x: entities.issubset(x)))
& (~serie.index.isin(blacklist_articles))
]
result = set(filtered_serie.index)
if len(result) == 0:
return -1
elif len(result) == 1:
return result.pop()
else:
return None
def select_most_frequent_entities(
entities, article_entity_counter, text_entity_counter
):
"""
Decreasingly order the given entities using the article_entity_counter and text_entity_counter
:param entities: entities to order
:return: ordered entities
"""
weighted_result = {}
for entity in entities:
weighted_result[entity] = (
article_entity_counter[entity] * 10 + text_entity_counter[entity]
)
tuple_ent_weight = sorted(weighted_result.items(), key=lambda x: x[1], reverse=True)
result = [ent[0] for ent in tuple_ent_weight]
return result
def extract_selected_entity(
entities, blacklist_entities, serie, article_entity_counter, text_entity_counter
):
"""
Select the entities for the next level tree
:param entities: entities to use to select the next article
:param blacklist_entities: entities which should not appear in the tree
:param serie: pandas series with object id as index and entities as values
:return: entity for the next level tree
"""
filtered_serie = serie.loc[serie.apply(lambda x: entities.issubset(x))]
full_result = (
set()
.union(*[s for s in filtered_serie])
.difference(entities.union(blacklist_entities))
)
return select_most_frequent_entities(
full_result, article_entity_counter, text_entity_counter
)
if __name__ == "__main__":
pass
|
plista/news_knowledge_tree | data_processing/find_entity.py | import functools
import sys
from typing import Set
import nltk
from flair.data import Sentence
from flair.models import SequenceTagger
from langdetect import detect
@functools.lru_cache(maxsize=1)
def get_tagger(language: str) -> SequenceTagger:
"""Return the tagger needed """
if language == "de":
return SequenceTagger.load("de-ner")
if language == "en":
return SequenceTagger.load("ner-fast")
raise Exception("Invalid language")
def filter_text(text: str) -> str:
"""remove unwanted character from the text which can disturb NER"""
filtered = text
for s in "\\\xa0\"'[]()’“”\xad":
filtered = filtered.replace(s, "")
return filtered
def format_entities(entities: Set[str]) -> Set[str]:
"""
Remove
:param entity:
:return:
"""
result = []
for entity in entities:
if entity[-1] in [".", ",", "?", "!", ":"]:
entity = entity[0:-1]
entity = entity.replace("\n", " ")
if entity[-1] == "s" and entity[:-1] in entities:
continue
if not entity:
continue
result.append(entity)
return set(result)
@functools.lru_cache(maxsize=512)
def find_entity(text: str, language: str) -> Set[str]:
"""extract entity using flair"""
global tagger
filtered = filter_text(text)
if not filtered:
return set()
detected_language = detect(filtered)
if language != detected_language:
return set()
sent_tokens = nltk.sent_tokenize(filtered)
sentences = [Sentence(i) for i in sent_tokens]
tagger = get_tagger(language)
tagger.predict(sentences)
flair_entities = []
for sentence in sentences:
flair_entities.extend(
[entity.text for entity in sentence.get_spans("ner")]
)
result = format_entities(set(flair_entities))
return result
if __name__ == "__main__":
text = sys.argv[1]
print(find_entity(text))
|
plista/news_knowledge_tree | data_processing/main.py | <gh_stars>0
from flask_restplus import Resource, Api, fields
from flask import Flask, request, jsonify
from flask_restplus import reqparse
from googlenews import process_headlines
from flask_cors import CORS
app = Flask(__name__)
api = Api(app)
CORS(app)
interaction_model = api.model(
"interaction",
{
"language": fields.String("language of the article we want to fetch"),
"country": fields.String("unicode of the country we want to fetch"),
},
)
@api.route("/get_headlines")
class GetTree(Resource):
endpoint_arguments = reqparse.RequestParser()
endpoint_arguments.add_argument("language", type=str, required=True)
endpoint_arguments.add_argument("country", type=str, required=True)
@api.expect(endpoint_arguments)
def get(self):
language = request.args.get("language")
country = request.args.get("country")
print(f"get headline for {language} in {country}")
return process_headlines(language, country)
@api.route("/healthcheck")
class GetHealthCheck(Resource):
"""
Endpoint to return healthcheck response ok.
"""
def get(self):
return {"msg": "ok"}, 200
@app.after_request
def after_request(response):
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add("Access-Control-Allow-Headers", "Content-Type,Authorization")
response.headers.add("Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS")
return response
# Access swaggger documentation at http://<address>:5000/
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5001, debug=True)
|
moff4/gladius | db/tag_tag_map.py | <reponame>moff4/gladius
from pony import orm
from conf import conf
from .tag import Tag
class TagTagMap(conf.sql.Entity):
_table_ = ('hashtag', 'tag_tag_map')
tag1 = orm.Required(Tag, reverse='nears')
tag2 = orm.Required(Tag, reverse='nears_')
posts_num = orm.Required(float)
orm.PrimaryKey(tag1, tag2)
|
moff4/gladius | tasks/server.py |
from conf import conf
from k2.aeon import Aeon
from sm import (
VecRelSM,
)
task_description = 'run web api'
def start():
server = Aeon(
namespace={
r'^/relevance': {
r'^/vecrel$': VecRelSM(),
},
},
**conf.aeon
)
server.run()
|
moff4/gladius | main.py | <gh_stars>0
#!/usr/bin/env python3.8
import sys
import asyncio
from k2.logger import BaseLogger as logger
from conf import conf
from tasks import TASKS
HELP_MSG = '''
Gladius - recomendation system
$ ./main.py [commands] [flags]
Flags:
-h, --help, -? - see this msg again
-s, --server - start web-server
--create-tables - create sql tables
--no-c - do not use C extension
Commands:
''' + '\n'.join(
[
'%s \t- %s' % (
key,
(
handler.task_description
if hasattr(handler, 'task_description') else
'no task description'
),
)
for key, handler in TASKS.items()
]
) + '\n'
def setup():
conf.sql.generate_mapping(create_tables='--create-tables' in sys.argv)
def run_task(task_name, f):
try:
f()
except Exception as e:
asyncio.run(logger.exception('"{}" exception: {}', task_name, e))
def main():
fs = [(i, TASKS[i].start) for i in sys.argv if i in TASKS]
if not fs:
print('no tasks to run')
return
setup()
for i, f in fs:
run_task(i, f)
if __name__ == '__main__':
print(HELP_MSG) if set(sys.argv) & {'-h', '--help', '-?'} else main()
|
moff4/gladius | logic/rank/ext.py | #!/usr/bin/env python3
import ctypes
import numpy.linalg
import random
mod = ctypes.CDLL(r'ext/utils.so')
"""
double interpolice(double, double* , double*,int);
"""
__interpolice = ctypes.CFUNCTYPE(
ctypes.c_double,
ctypes.c_double,
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double),
ctypes.c_int,
)(('interpolice', mod))
"""
weight_random_t* weight_random_create(long double*,long double*,long double*);
"""
_weight_random_create = ctypes.CFUNCTYPE(
ctypes.c_void_p,
ctypes.POINTER(ctypes.c_longdouble),
ctypes.POINTER(ctypes.c_longdouble),
ctypes.POINTER(ctypes.c_longdouble),
)(('weight_random_create', mod))
"""
int weight_random_rand(weight_random_t*,long double);
"""
_weight_random_rand = ctypes.CFUNCTYPE(
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_longdouble,
)(('weight_random_rand', mod))
"""
int weight_random_will_be(weight_random_t*, int, int)
"""
_weight_random_will_be = ctypes.CFUNCTYPE(
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
)(('weight_random_will_be', mod))
"""
void weight_random_destruct(weight_random_t*);
"""
_weight_random_destruct = ctypes.CFUNCTYPE(
ctypes.c_void_p,
ctypes.c_void_p,
)(('weight_random_destruct', mod))
def interpolice(ff: list, x: float) -> float:
az = (ctypes.c_double * len(ff))() # change to double, double* , double*
bz = (ctypes.c_double * len(ff))()
for i in range(len(ff)):
az[i] = ctypes.c_double(ff[i][0]) # x
bz[i] = ctypes.c_double(ff[i][1]) # f(x)
x = ctypes.c_double(x)
return __interpolice(x, bz, az, len(ff))
class CWeightRandom:
def __init__(self, data):
"""
data = [
[ x0 , y0 ] , - левая точка нуля пораболы
[ x1 , y1 ] , - пик
[ x2 , y2 ] - точка после пика через ( x1-x0 )
]
endpoint = mun of second that:
1. > x0
2. <= x0 + 86000
"""
# p = [ x0 , x1 , y1 , y2]
self.x = data[0][0]
p = [data[0][0] - self.x, data[1][0] - self.x, data[1][1], data[2][1]]
mm = [
[p[0]**2, p[0], 1],
[p[1]**2, p[1], 1],
[(2 * p[1] - p[0])**2, (2 * p[1] - p[0]), 1]
]
abc = list(numpy.linalg.solve(mm, [0, p[2], 0]))
a = abc[0]
b = abc[1]
c = abc[2]
mm = [
[1.0, -p[2]],
[1.0, -p[3]]
]
dk = numpy.linalg.solve(mm, [p[2] * p[1], p[3] * (2 * p[1] - p[0])])
d = dk[0]
k = dk[1]
p0 = (ctypes.c_longdouble * 4)()
for i in range(4):
p0[i] = ctypes.c_longdouble(p[i])
az = [a, b, c]
p1 = (ctypes.c_longdouble * 3)()
for i in range(len(az)):
p1[i] = ctypes.c_longdouble(az[i])
az = [d, k]
p2 = (ctypes.c_longdouble * 2)()
for i in range(len(az)):
p2[i] = ctypes.c_longdouble(az[i])
self.this = _weight_random_create(p0, p1, p2)
def random(self, y=1) -> list:
"""
return random number using spread function
"""
return (
[(_weight_random_rand(self.this, random.random()) + self.x) % 86400]
) if y <= 1 else (
[_weight_random_rand(self.this, random.random()) for _ in range(y)]
)
def will_be(self, count_now: int, now: int) -> float:
"""
predict now many obj will be in 24 hours
"""
return _weight_random_will_be(
self.this,
ctypes.c_int(int(count_now)),
ctypes.c_int(int(now)),
)
def destruct(self):
"""
destructor
"""
_weight_random_destruct(self.this)
if __name__ == '__main__':
for i in range(10, 14):
print(i)
date = i * 3600
f = 1.0
wr = CWeightRandom([
[date, 0.0],
[date + (3600 + 600) / f, 3200],
[date + (2 * 3600) / f, 1850]
])
az = []
bz = []
M = 5 * 10**3
for j in range(M):
bz.append(j)
az.append(wr.random())
import matplotlib.pyplot as plt
plt.plot(az, bz, 'ro')
plt.axis([0, 3600 * 24, 0, M])
plt.show()
wr.destruct()
|
moff4/gladius | db/path.py | <filename>db/path.py
from pony import orm
from conf import conf
from .tag import Tag
class Path(conf.sql.Entity):
_table_ = ('hashtag', 'path')
src = orm.Required(Tag, reverse='path_src')
dst = orm.Required(Tag, reverse='path_dst')
tag = orm.Required(Tag, reverse='path_in')
pos = orm.Required(int) # 0 - src, max - dst
orm.PrimaryKey(src, dst, tag)
|
moff4/gladius | logic/rank/__init__.py | <reponame>moff4/gladius
from .rank import ranks
__all__ = [
'ranks',
]
|
moff4/gladius | tasks/__init__.py | from tasks import shell
from tasks import server
from tasks import ranks
from tasks import clustering
from tasks import pather
from tasks import graph
from tasks import node2vec
TASKS = {
'shell': shell,
'server': server,
'ranks': ranks,
'clustering': clustering,
'pather': pather,
'graph': graph,
'node2vec': node2vec,
}
|
moff4/gladius | tasks/pather.py | <reponame>moff4/gladius
import asyncio
import random
from pony import orm
import networkx as nx
from k2.logger import new_channel
from conf import conf
from logic import GRAPH
from db import Tag, Path
from utils import parse_args
from utils.parallel import parallel_run
ARGS = {
'limit': None,
}
logger = new_channel('pather')
task_description = 'find random pathes to fill Pather Table; args: %s' % ', '.join(list(ARGS))
def procc_worker(q_in, q_out):
graph = GRAPH.graph
with orm.db_session:
while (args := q_in.get()) is not None:
src, dst = args
try:
q_out.put(nx.dijkstra_path(graph, src, dst))
except Exception as e:
q_out.put('build path: %s' % e)
q_in.close()
q_out.close()
q_in.join_thread()
q_out.join_thread()
async def generate_pathes(limit):
async def read(res):
def write(data, try_: int = 0):
try:
with orm.db_session:
for idx, tag in enumerate(data):
Path(src=data[0], dst=data[-1], tag=tag, pos=idx)
return None
except Exception as e:
if try_ < 3:
return write(res, try_ + 1)
return str(e)
if res:
if isinstance(res, list):
await logger.info('build path for "{}" -> "{}"', res[0], res[-1])
if res := write(res):
await logger.error(res)
else:
await logger.error(res)
with orm.db_session:
tags = list(t.tag for t in orm.select(t for t in Tag if t.checked))
await parallel_run(
itr=[
(
random.choice(tags),
random.choice(tags),
)
for _ in range(limit)
],
worker=procc_worker,
handler=read,
proc_num=conf.graph.proc_num,
)
def start():
args = parse_args(ARGS)
asyncio.run(
generate_pathes(
limit=int(args['limit']) if args['limit'] else None,
)
)
|
moff4/gladius | logic/__init__.py |
from .rank import ranks
from .clustering import Clustering
from .graph import Graph, GRAPH
from .cursach import Cursach
__all__ = [
'ranks',
'Clustering',
'Graph',
'GRAPH',
'Cursach',
]
|
moff4/gladius | db/tag.py | from datetime import datetime
from pony import orm
from conf import conf
class Tag(conf.sql.Entity):
_table_ = ('hashtag', 'tag')
tag = orm.PrimaryKey(str)
created = orm.Optional(datetime)
checked = orm.Optional(int)
rank_pop = orm.Optional(float, nullable=True)
rank_qual = orm.Optional(float, nullable=True)
clustering = orm.Optional(float, nullable=True)
frequency = orm.Optional(float, nullable=True)
posts = orm.Set('PostTag')
nears = orm.Set('TagTagMap')
nears_ = orm.Set('TagTagMap')
path_src = orm.Set('Path')
path_dst = orm.Set('Path')
path_in = orm.Set('Path')
|
moff4/gladius | tasks/clustering.py | import asyncio
from pony import orm
from k2.logger import new_channel
from conf import conf
from db import Tag
from logic import Clustering
from utils import parse_args
from utils.parallel import parallel_run
ARGS = {
'dry': False,
'limit': None,
'tag': None,
'reverse': None,
}
logger = new_channel('clustering')
task_description = 'count clustering coeff for tag; args: %s' % ', '.join(list(ARGS))
def procc_worker(q_in, q_out):
cl = Clustering()
with orm.db_session:
while (tag := q_in.get()) is not None:
q_out.put((tag, cl.clustering_coeff(tag)))
q_in.close()
q_out.close()
q_in.join_thread()
q_out.join_thread()
async def count_clustering(tag: str, limit: int, dry_mode: bool, reverse: bool):
async def read(t, c):
nonlocal done
done += 1
await logger.info('clustering_coeff for "%s" is %s' % (t, c))
if not dry_mode:
Tag.get(lambda y: y.tag == t).clustering = c
if not (done % 100):
orm.commit()
if tag:
qs = orm.select(t for t in Tag if t.tag == tag)
else:
qs = orm.select(t for t in Tag if t.checked and t.clustering is None)
if reverse:
qs = qs.order_by(lambda t: orm.desc(t.tag))
if limit:
qs = qs.limit(limit)
with orm.db_session:
tags = {t.tag: None for t in qs}
done = 0
with orm.db_session:
await parallel_run(list(tags), read, conf.rank.proc_num)
def start():
args = parse_args(ARGS)
asyncio.run(
count_clustering(
limit=int(args['limit']) if args['limit'] else None,
dry_mode=args['dry'],
tag=args['tag'],
reverse=args['reverse'],
)
)
|
moff4/gladius | logic/narrowness.py |
from typing import List
from logic.graph import GRAPH
def narrowness(tags: List[str]) -> float:
if not tags:
return 1.0
GRAPH.multi_weight(
(src, dst)
for src in tags
for dst in tags
)
res = (
sum(
weights := [
GRAPH.weight(src, dst)
for src in tags
for dst in tags
if src != dst
]
) / len(weights)
)
GRAPH.save()
return res
|
moff4/gladius | logic/graph.py | import json
from typing import Iterable, Tuple, List
import networkx as nx
from k2.logger import new_channel
from conf import conf
from utils.parallel import parallel_run
def worker(q_in, q_out):
graph = GRAPH.graph
while (args := q_in.get()) is not None:
src, dst = args
try:
q_out.put((src, dst, nx.dijkstra_path_length(graph, src, dst)))
except Exception as e:
q_out.put('build path: %s' % e)
q_in.close()
q_out.close()
q_in.join_thread()
q_out.join_thread()
class Graph:
def __init__(self):
self._graph = None
self._loaded = False
self.logger = new_channel('graph')
self._cache_changed = False
if conf.api.cache_dump_enable:
try:
self.cache = {
eval(key): value
for key, value in json.load(open(conf.api.cache_dump_file)).items()
}
except (IOError, json.JSONDecodeError):
self.cache = dict()
else:
self.cache = dict()
def __del__(self):
self.save()
def save(self):
if conf.api.cache_dump_enable and self._cache_changed:
try:
data = json.dumps(
{
str(key): value
for key, value in self.cache.items()
}
)
with open(conf.api.cache_dump_file, 'w') as f:
f.write(data)
except (IOError, json.JSONDecodeError):
...
def load(self):
if not self._loaded:
self._graph = nx.read_weighted_edgelist(conf.graph.file)
self._loaded = True
async def handler(self, res):
if isinstance(res, str):
await self.logger.error(res)
else:
src, dst, w = res
self.cache[self.key(src, dst)] = w
self._cache_changed = True
@staticmethod
def key(src, dst):
return (src, dst) if src > dst else (dst, src)
@property
def graph(self):
self.load()
return self._graph
async def multi_weight(self, itr: Iterable[Tuple[str, str]]) -> None:
if len(
itr := list(
{
key
for src, dst in itr
if src != dst
if (key := self.key(src, dst)) not in self.cache
}
)
) > 20:
await self.logger.debug('gonna count weights for {} pairs', len(itr))
await parallel_run(
itr=itr,
handler=self.handler,
worker=worker,
proc_num=conf.graph.proc_num,
)
await self.logger.debug('counted weights, cache - {}', len(self.cache))
def weight(self, src: str, dst: str) -> float:
if src == dst:
return 0.0
if (key := self.key(src, dst)) not in self.cache:
self.cache[key] = nx.dijkstra_path_length(self.graph, *key)
return self.cache[key]
def path(self, src: str, dst: str) -> List[str]:
if src == dst:
return [src]
return nx.dijkstra_path(self.graph, src, dst)
def __contains__(self, item):
return item in self.graph
GRAPH = Graph()
|
moff4/gladius | db/rought.py |
from pony import orm
from conf import conf
class Rought(conf.sql.Entity):
_table_ = ('hashtag', 'rought')
tag1 = orm.Required(str)
tag2 = orm.Required(str)
weight = orm.Required(float)
orm.PrimaryKey(tag1, tag2)
@staticmethod
def create_multi(roughts):
if roughts:
sql = '''
INSERT INTO `rought` (`tag1`, `tag2`, `weight`) values %s
ON DUPLICATE KEY UPDATE
`tag1` = values(`tag1`),
`tag2` = values(`tag2`),
`weight` = values(`weight`)
''' % (
','.join(['(%s)' % ','.join(['%s'] * len(row)) for row in roughts])
)
con = conf.sql.get_connection()
cur = con.cursor()
cur.execute(sql, [arg for row in roughts for arg in row])
cur.close()
con.commit()
|
moff4/gladius | tasks/graph.py | import asyncio
from pony import orm
from k2.logger import new_channel
from conf import conf
from db import TagTagMap
logger = new_channel('graph-dump')
task_description = 'dump graph to file (for clustering & pather)'
async def dump():
done = 0
try:
with orm.db_session:
await logger.info('gonna dump graph into file "{}"', conf.graph.file)
with open(conf.graph.file, 'w') as f:
for row in orm.select(tag for tag in TagTagMap):
f.write('{} {} {}\n'.format(row.tag1.tag, row.tag2.tag, 1.0 / row.posts_num))
done += 1
if not (done % 1000):
await logger.debug('dumped {} rows', done)
except Exception as ex:
await logger.error('got exception: {}', ex)
else:
await logger.info('success!')
def start():
asyncio.run(dump())
|
moff4/gladius | utils/__init__.py | <gh_stars>0
import sys
def parse_args(default_args):
args = dict(default_args)
for arg in sys.argv[1:]:
for key in args:
if arg.startswith('--{}'.format(key)):
arg = arg[len(key) + 2:]
args[key] = arg[1:] if arg.startswith('=') else True
return args
|
moff4/gladius | db/__init__.py |
from .tag import Tag
from .post import Post
from .rought import Rought
from .post_tag import PostTag
from .tag_tag_map import TagTagMap
from .path import Path
__all__ = [
'Tag',
'Post',
'Rought',
'PostTag',
'TagTagMap',
'Path',
]
|
moff4/gladius | db/post_tag.py | from pony import orm
from datetime import datetime
from conf import conf
from .post import Post
from .tag import Tag
class PostTag(conf.sql.Entity):
_table_ = ('hashtag', 'post_tag')
post = orm.Required(Post, reverse='tags')
tag = orm.Required(Tag, reverse='posts')
created = orm.Optional(datetime)
@staticmethod
def rank_weights(tag):
res = [
float(i) if i is not None else 0.0
for i in conf.sql.select(
'''SELECT
count(p.post_id),
sum(p.views),
sum(p.likes),
sum(p.reposts)
FROM (
select * from hashtag.post_tag
where tag = $(tag)
) pt left join hashtag.post p
on pt.post = p.post_id;
'''
)[0]
]
return [
(res[0] / res[1]) if res[1] else 0.0,
(res[0] / res[2]) if res[2] else 0.0,
(res[0] / res[3]) if res[3] else 0.0,
]
|
moff4/gladius | logic/clustering.py |
from collections import defaultdict
from conf import conf
SQL_LOAD_NEAREST = '''
SELECT distinct pt1.tag, pt2.tag
FROM (
select *
from hashtag.post_tag
where tag in (%s)
) pt1 inner join hashtag.post_tag pt2 on pt1.post = pt2.post
and pt1.tag != pt2.tag
'''
LOAD_LIMIT = 50
class Clustering:
def __init__(self):
# tag -> set of neighboors
self.__near = defaultdict(set)
self._loaded = set()
self._queue = set()
def nears(self, tag: str) -> set:
if tag in self._loaded:
return self.__near[tag]
tags = [tag]
for i, t in enumerate(self._queue - self._loaded):
tags.append(t)
if i > LOAD_LIMIT:
break
conn = conf.sql.get_connection()
cur = conn.cursor()
cur.execute(SQL_LOAD_NEAREST % ','.join(['%s'] * len(tags)), tags)
row = cur.fetchone()
while row is not None:
self.__near[(t := row[0])].add(row[1])
if t in self._queue:
self._queue.remove(row[0])
row = cur.fetchone()
self._loaded.update(tags)
return self.__near[tag]
def clustering_coeff(self, tag: str) -> float:
near = self.nears(tag)
self._queue.update(near)
k = len(near)
if k <= 1:
c = 1.0
else:
fr_eges = {
'@'.join([i, j] if i > j else [j, i])
for i in near
for j in (near & self.nears(i))
}
c = 2 * len(fr_eges) / (k * (k - 1))
return c
|
moff4/gladius | tasks/ranks.py | import math
import asyncio
from pony import orm
from multiprocessing.pool import Pool
from k2.logger import new_channel
from conf import conf
from db import (
Tag,
PostTag,
)
from logic import ranks
from utils import parse_args
logger = new_channel('ranks')
ARGS = {
'dry': False,
'tags': None,
'limit': None,
'reverse': None,
}
task_description = 'count ranks for tags; args: %s' % ', '.join(list(ARGS))
def parallel_post_ranks(args):
return post_ranks(*args)
def post_ranks(post, weight_views, weight_likes, weight_reposts):
rank_pop, rank_qual = ranks(
post_date=post['post_date'],
views=post['views'],
likes=post['likes'],
reposts=post['reposts'],
_time=post['timestamp'],
weight_views=weight_views,
weight_likes=weight_likes,
weight_reposts=weight_reposts,
)
return rank_pop, rank_qual
async def tag_ranks(tag: Tag, dry_mode: bool=False) -> (float, float):
if tag.rank_pop is not None and tag.rank_qual is not None:
return tag.rank_pop, tag.rank_qual
p, q = 0.0, 0.0
c = 0
weight_views, weight_likes, weight_reposts = PostTag.rank_weights(tag.tag)
pts = [
{
'post_date': pt.post.post_date,
'views': pt.post.views,
'likes': pt.post.likes,
'reposts': pt.post.reposts,
'timestamp': pt.post.timestamp,
}
for pt in tag.posts.filter(lambda p: p.post.from_group > 0)
]
c = len(pts)
proc_num = conf.rank.proc_num
with Pool(proc_num) as pool:
b = int(c / proc_num + 1)
for _q, _p in pool.map(
parallel_post_ranks,
[
(
pt,
weight_views,
weight_likes,
weight_reposts,
)
for i in range(proc_num)
for pt in pts[b * i: b * (i + 1)]
]
):
p += _q
q += _q
if c:
p = math.log(1.0 + float(p) / float(c))
q = math.log(1.0 + float(q) / float(c))
else:
p = 0.0
q = 0.0
if not dry_mode:
tag.rank_pop = p
tag.rank_qual = q
await logger.debug(
'"{}": p{:.4f} q{:.4f}',
tag.tag,
p,
q,
)
return p, q
async def count_ranks(qs, dry_mode):
x = 0
try:
for tag in qs:
await tag_ranks(tag, dry_mode)
x += 1
if not dry_mode and not (x % 100):
orm.commit()
except Exception:
await logger.exception('T.ranks counted:')
finally:
return x
async def count_all(qs, dry_mode):
done, count = 0, qs.count()
while done < count:
done += await count_ranks(
qs=qs.limit(100),
dry_mode=dry_mode,
) or 1
@orm.db_session
def start():
args = parse_args(ARGS)
qs = orm.select(
t
for t in Tag
if t.checked is not None
if t.rank_pop is None or t.rank_qual is None
)
if args['tags']:
qs = qs.filter(lambda t: t.tag in args['tags'].split(','))
if args['reverse']:
qs = qs.order_by(lambda t: orm.desc(t.tag))
if args['limit']:
qs = qs.limit(int(args['limit']))
coro = count_ranks(qs=qs, dry_mode=args['dry'])
elif args['tags'] is None:
coro = count_all(qs=qs, dry_mode=args['dry'])
else:
coro = count_ranks(qs=qs, dry_mode=args['dry'])
try:
asyncio.run(coro)
except KeyboardInterrupt:
...
|
moff4/gladius | sm/vecrel.py | <reponame>moff4/gladius
from k2.aeon.sitemodules import (
AuthSM,
RestSM,
)
from conf import conf
from logic import Cursach
ALG_MAP = {
'mindminer': {
'func': (cursach := Cursach()).relevanse,
'params': {},
},
'rankedminer': {
'func': cursach.ranked_relevanse,
'params': {},
}
}
class VecRelSM(AuthSM, RestSM):
POST_schema = {
'type': dict,
'value': {
'alg': {
'type': 'const',
'value': set(ALG_MAP)
},
'cases': {
'type': dict,
'anykey': {
'type': dict,
'value': {
'user': {'type': list, 'value': str},
'target': {'type': list, 'value': str},
'params': {'type': dict, 'default': dict},
},
},
},
},
}
@staticmethod
def authorizator(request):
return request.headers.get('x-api-secret') == conf.api.secret
@staticmethod
def get(request):
return {
'result': [
{
'name': alg_name,
'params': alg_data['params'],
}
for alg_name, alg_data in ALG_MAP.items()
]
}
async def post(self, request):
return {
'result': await ALG_MAP[request.data['alg']]['func'](request.data['cases'])
}
|
moff4/gladius | utils/parallel.py | import time
from multiprocessing import Process, Queue
from typing import List, Callable
async def parallel_run(itr: List, handler: Callable, worker: Callable, proc_num: int):
async def read(q):
x = 0
while not q.empty():
res = q.get()
x += 1
await handler(res)
return x
def check_procs(az):
bz = []
for p in az:
if p.is_alive():
bz.append(p)
else:
p.join()
return bz
len_ = len(itr)
itr += [None] * proc_num
proc_num = min(proc_num, len_)
q_in, q_out = Queue(), Queue()
for i in range(proc_num):
q_in.put(itr.pop(0))
procs = [
(p := Process(target=worker, args=(q_in, q_out))).start() or p
for _ in range(proc_num)
]
done = 0
while done < len_ and procs:
done += await read(q_out)
i = 0
while itr and i < 20:
q_in.put(itr.pop(0))
i += 1
time.sleep(0.001)
procs = check_procs(procs)
for p in procs:
p.join()
await read(q_out)
|
moff4/gladius | logic/rank/rank.py | #!/user/bin/env python3
import math
import time
from conf import conf
from .wr import (
interpolice,
WeightRandom,
WR_params,
)
LOOPS_MAP = {}
for i in range(10):
LOOPS_MAP[i] = 50
for i in range(10, 50):
LOOPS_MAP[i] = 10
for i in range(50, 100):
LOOPS_MAP[i] = 5
def f(_time) -> float:
tt = time.localtime(_time)
return interpolice((float(tt.tm_hour) + (float(tt.tm_min) / 60.0))) + 1
def mark_rank(j, w, post_date, wr) -> float:
_k = LOOPS_MAP.get(j, 1)
return w / sum(
(1.0 + f(post_date + sum(wr.random(_k)) / float(_k)))
for _ in range(j)
)
def ranks(
post_date: int,
likes: int,
reposts: int,
views: int,
_time: int,
weight_views: float,
weight_likes: float,
weight_reposts: float,
) -> tuple:
"""
count rank for post
:rtype tuple: (rank_pop as float, rank_qual as float)
"""
def will_be(x):
if (x > 0) and (
y := int(
wr.will_be(
count_now=x,
now=_time,
)
)
) > 0:
return y
return x
wr = WeightRandom(WR_params(post_date, f(post_date)))
views = will_be(views)
rank_qual = sum(
mark_rank(j, w, post_date, wr)
for j, w in [
(will_be(likes), weight_likes),
(will_be(reposts), weight_reposts),
]
if j
)
rank_pop = rank_qual
if views:
rank_qual /= views / conf.rank.quality_precision
rank_qual = math.log(rank_qual + 1.0)
else:
rank_qual = 0.0
rank_pop = math.log(rank_pop + 1.0)
if views:
rank_pop += mark_rank(views, weight_views, post_date, wr)
wr.destruct()
return rank_pop, rank_qual
|
moff4/gladius | sm/__init__.py | <gh_stars>0
from .vecrel import VecRelSM
__all__ = [
'VecRelSM',
]
|
moff4/gladius | logic/cursach.py | <filename>logic/cursach.py
from typing import Any, List, Dict
from pony import orm
from k2.logger import new_channel
from db import Tag
from .graph import GRAPH
class Cursach:
def __init__(self):
self.logger = new_channel('cursach')
async def prepare(self, cases: Dict[str, Any]):
await GRAPH.multi_weight(
(src, dst)
for _, data in cases.items()
for src in data['user']
if src in GRAPH
for dst in data['target']
if dst in GRAPH
)
async def relevanse_one(
self,
user: List[str],
target: List[str],
params: Dict[str, Any],
rank_map: Dict[str, float] = None
):
rank_map = rank_map or {}
r = [
GRAPH.weight(src, dst) * rank_map.get(src, 1.0) * rank_map.get(dst, 1.0)
for src in user
if src in GRAPH
for dst in target
if dst in GRAPH
]
return (sum(r) / len(r)) if r else 0.0
async def relevanse(self, cases: Dict[str, Any]):
await self.prepare(cases)
res = {
key: await self.relevanse_one(user=data['user'], target=data['target'], params=data['params'])
for key, data in cases.items()
}
GRAPH.save()
return res
async def ranked_relevanse(self, cases: Dict[str, Any]):
await self.prepare(cases)
tags = {
tag
for _, data in cases.items()
for pool_key in ['user', 'target']
for tag in data[pool_key]
}
with orm.db_session:
rank_map = {
tag.tag: tag.rank_pop
for tag in Tag.select(lambda t: t.tag in tags)
}
res = {
key: await self.relevanse_one(
user=data['user'],
target=data['target'],
params=data['params'],
rank_map=rank_map,
)
for key, data in cases.items()
}
GRAPH.save()
return res
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.