content
stringlengths 5
1.05M
|
|---|
from unittest import TestCase
from unittest.mock import MagicMock
from sciit import IssueSnapshot
from tests.external_resources import safe_create_repo_dir, remove_existing_repo
class TestIssueSnapshot(TestCase):
def setUp(self):
self.data = {
'issue_id': '1',
'title': 'new issue here',
'file_path': '.gitignore',
'contents': '# Adding a new thing\nAuthor: someone on the team'}
self.data1 = {
'issue_id': '2',
'title': 'new issue here2',
'file_path': '.gitignore',
'contents': '# something different'}
self.data3 = {
'issue_id': '2',
'title': 'The title of your issue',
'description': 'A description of you issue as you\nwant it to be ``markdown`` supported',
'assignees': 'nystrome, kevin, daniels',
'due_date': '12 oct 2018',
'labels': 'in-development',
'weight': '4',
'priority': 'high',
'file_path': 'README.md'}
safe_create_repo_dir('dummy_repo')
self.commit = MagicMock()
self.issue_snapshot_1 = IssueSnapshot(self.commit, self.data.copy(), ['master'])
self.issue_snapshot_2 = IssueSnapshot(self.commit, self.data1.copy(), ['master'])
def test_create_issue_snapshot(self):
issue_snapshot = IssueSnapshot(self.commit, self.data.copy(), ['master'])
self.assertIn(self.data['file_path'], issue_snapshot.data['file_path'])
self.assertIn(self.data['contents'], issue_snapshot.data['contents'])
def test_create_existing_issue_snapshot_returns_existing_issue(self):
issue = IssueSnapshot(self.commit, self.data, ['master'])
self.assertEqual(self.issue_snapshot_1, issue)
def test_second_issue_created_gt_first(self):
self.assertGreater(self.issue_snapshot_2, self.issue_snapshot_1)
def test_first_issue_created_lt_second(self):
self.assertLess(self.issue_snapshot_1, self.issue_snapshot_2)
def test_issue_string_printed_properly(self):
self.assertTrue('@' in str(self.issue_snapshot_1))
def test_create_issue_snapshot_full_metadata(self):
issue_snapshot = IssueSnapshot(self.commit, self.data3.copy(), ['master'])
self.assertTrue(hasattr(issue_snapshot, 'issue_id'))
self.assertTrue(hasattr(issue_snapshot, 'title'))
self.assertTrue(hasattr(issue_snapshot, 'description'))
self.assertTrue(hasattr(issue_snapshot, 'assignees'))
self.assertTrue(hasattr(issue_snapshot, 'due_date'))
self.assertTrue(hasattr(issue_snapshot, 'labels'))
self.assertTrue(hasattr(issue_snapshot, 'weight'))
self.assertTrue(hasattr(issue_snapshot, 'priority'))
self.assertTrue(hasattr(issue_snapshot, 'file_path'))
def test_get_issue_snapshot_full_metadata(self):
issue_snapshot = IssueSnapshot(self.commit, self.data3.copy(), ['master'])
self.assertTrue(hasattr(issue_snapshot, 'issue_id'))
self.assertTrue(hasattr(issue_snapshot, 'title'))
self.assertTrue(hasattr(issue_snapshot, 'description'))
self.assertTrue(hasattr(issue_snapshot, 'assignees'))
self.assertTrue(hasattr(issue_snapshot, 'due_date'))
self.assertTrue(hasattr(issue_snapshot, 'labels'))
self.assertTrue(hasattr(issue_snapshot, 'weight'))
self.assertTrue(hasattr(issue_snapshot, 'priority'))
self.assertTrue(hasattr(issue_snapshot, 'file_path'))
def tearDown(self):
remove_existing_repo('dummy_repo')
|
# Advent of Code 2015 - Day 4 Part 1
# 17 Nov 2021 Brian Green
#
# Problem:
# find Santa the lowest positive number (no leading zeroes: 1, 2, 3, ...) that produces such a hash.
#
import hashlib
prefix = '11111'
base = -1
while prefix != '00000':
base += 1
xhash = hashlib.md5()
xhash.update(b'iwrupvqb')
xhash.update(str(base).encode())
prefix = xhash.hexdigest()[:5]
print(f"{base} {prefix}")
print(base)
|
import os
import sys
from datetime import datetime
import csv
# Layer code, like parsing_lib, is added to the path by AWS.
# To test locally (e.g. via pytest), we have to modify sys.path.
# pylint: disable=import-error
try:
import parsing_lib
except ImportError:
sys.path.append(
os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'common/python'))
import parsing_lib
private_public_map = {'Público': 'Public', 'Privado': 'Private'}
def convert_date(date_str: str):
"""
Convert raw date field into a value interpretable by the dataserver.
The date is listed in YYYY-mm-dd format
"""
date = datetime.strptime(date_str, "%Y-%m-%d")
return date.strftime("%m/%d/%Y")
def convert_gender(raw_gender):
if raw_gender == "M":
return "Male"
if raw_gender == "F":
return "Female"
def convert_location(entry):
'''
The only information we have is the province where case was diagnosed/hospitalised
Geocoding function can't parse CABA so replacing with Buenos Aires.
'''
if entry['carga_provincia_nombre']:
if entry['carga_provincia_nombre'] == 'CABA':
return {
"query": "Buenos Aires, Argentina"}
else:
return {
"query": f"{entry['carga_provincia_nombre']}, Argentina"}
else:
return {"query": "Argentina"}
def convert_age(entry):
'''
Want to return a float specifying age in years. If age field is empty, return None
'''
if entry['edad']:
if entry['edad_años_meses'] == 'Años':
return float(entry['edad'])
elif entry['edad_años_meses'] == 'Meses':
return float(entry['edad']) / 12
return None
def get_confirmed_event(entry):
if entry['fecha_diagnostico']:
confirmation_date = convert_date(entry['fecha_diagnostico'])
note = 'Using Date of Diagnosis as the date of confirmation.'
elif entry['fecha_inicio_sintomas']:
confirmation_date = convert_date(entry['fecha_inicio_sintomas'])
note = 'Using Date of Symptom Onset as the date of confirmation, because Date of Diagnosis is missing.'
elif entry['fecha_apertura']:
confirmation_date = convert_date(entry['fecha_apertura'])
note = 'Using Date of Case Opening as the date of confirmation, because both Date of Diagnosis and Date of Symptom Onset are missing.'
if 'Caso confirmado por laboratorio' in entry['clasificacion']:
confirmed_value = 'Laboratory Test'
elif 'Caso confirmado por criterio clínico-epidemiológico' in entry['clasificacion']:
confirmed_value = 'Clinical Diagnosis'
else:
confirmed_value = 'Method Unknown'
confirmed_event = {
"name": "confirmed",
"value": confirmed_value,
"dateRange":
{
"start": confirmation_date,
"end": confirmation_date
}}
return confirmed_event, note
def convert_residential_location(entry):
'''
This gets the residential address of the patient. Note this is not used to locate the case.
'''
query_terms = [term for term in [
entry.get("residencia_provincia_nombre", ""),
entry.get("residencia_departamento_nombre", ""),
entry.get("residencia_pais_nombre", "")]
if term]
return ", ".join(query_terms)
def parse_cases(raw_data_file, source_id, source_url):
"""
Parses G.h-format case data from raw API data.
We are currently only incorporating cases classified ('clasificacion_resumen') as 'Confirmed'. However,
970k out of 1.5M cases are listed as 'Discarded', even though many have data values resembling confirmed
Covid-19 patients, eg date_of_diagnosis, ICU_admission, mechanical breathing assistance. Future versions may
want to modify this behaviour.
For cases classified as Confirmed but lacking a Date of Diagnosis, we use Date of Symptom onset where present,
and Date of Case Opening where neither Date of Diagnosis or Date of Symptom Onset are present.
For case location, we use 'Province of case loading' (carga_provincia_nombre). This is where
the laboratory tests were carried out, so may not always correspond to the exact location of the case, but is
best proxy we have. The other location date refers to the residential address of the patient.
"""
with open(raw_data_file, "r") as f:
reader = csv.DictReader(f)
for entry in reader:
notes = []
if entry["clasificacion_resumen"] == "Confirmado":
case = {
"caseReference": {
"sourceId": source_id,
"sourceEntryId": entry["id_evento_caso"],
"sourceUrl": source_url
},
"location": convert_location(entry),
"demographics": {
"ageRange": {
"start": convert_age(entry),
"end": convert_age(entry)
},
"gender": convert_gender(entry["sexo"])
}
}
confirmed_event, confirmation_note = get_confirmed_event(entry)
case["events"] = [confirmed_event]
notes.append(confirmation_note)
if entry["fecha_inicio_sintomas"]:
case["symptoms"] = {
"status": "Symptomatic",
}
case["events"].append({
"name": "onsetSymptoms",
"dateRange": {
"start": convert_date(entry['fecha_inicio_sintomas']),
"end": convert_date(entry['fecha_inicio_sintomas']),
}
})
if entry["fecha_fallecimiento"]:
case["events"].append({
"name": "outcome",
"value": "Death",
"dateRange": {
"start": convert_date(entry['fecha_fallecimiento']),
"end": convert_date(entry['fecha_fallecimiento']),
}
})
elif entry["fecha_internacion"]:
case["events"].append({
"name": "outcome",
"value": "hospitalAdmission",
"dateRange": {
"start": convert_date(entry['fecha_internacion']),
"end": convert_date(entry['fecha_internacion']),
}
})
elif entry["fecha_cui_intensivo"]:
case["events"].append({
"name": "outcome",
"value": "icuAdmission",
"dateRange": {
"start": convert_date(entry['fecha_cui_intensivo']),
"end": convert_date(entry['fecha_cui_intensivo']),
}
})
if 'no activo' in entry['clasificacion'].lower():
case["events"].append({
"name": "outcome",
"value": "Recovered"})
if 'No activo (por tiempo de evolución)' in entry['clasificacion']:
notes.append(
"Patient recovery was confirmed by a number of days elapsing with no symptoms.")
elif "No Activo por criterio de laboratorio" in entry['clasificacion']:
notes.append(
"Patient recovery was confirmed by a negative laboratory test.")
notes.append(
f"Case was registered as being from {convert_residential_location(entry)}.")
notes.append(
f"Case last updated on {convert_date(entry['ultima_actualizacion'])}.")
if entry['origen_financiamiento'] in ['Público', 'Privado']:
notes.append(
f"Case was dealt with through {private_public_map[entry['origen_financiamiento']]} health system.")
if entry['asistencia_respiratoria_mecanica'] == 'SI':
notes.append("Patient received mechanical ventilation.")
if entry['clasificacion']:
notes.append(f"Diagnostic notes: {entry['clasificacion']}")
case["notes"] = "\n".join(notes)
yield case
def lambda_handler(event, context):
return parsing_lib.run_lambda(event, context, parse_cases)
|
import os
def parent_directory():
# Create a relative path to the parent
# of the current working directory
os.chdir('..')
# Return the absolute path of the parent directory
return os.getcwd()
print(parent_directory())
|
from .workers import celery
from datetime import datetime
from celery.schedules import crontab
from flask_sse import sse
from flask import url_for
from main import app as app
from .database import db
from .models import User,Deck
import os
import time
import requests
@celery.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(10.0, print_current_time.s(), name = 'at every 10 seconds')
sender.add_periodic_task(120.0, cleanup_exports.s(), name = 'remove all the export files')
sender.add_periodic_task(10.0, daily_remainder.s(), name = 'Send message to discord to revise daily')
#sender.add_periodic_task(crontab(hour=21, minute=15), daily_remainder.s(), name = 'Send daily reminders in discord to revise')
@celery.task()
def say_hello(name):
print("INSIDE THE TASK")
return "Hello {}!".format(name)
@celery.task()
def print_current_time():
print("START")
now = datetime.now()
print("now in task = ",now)
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("date and time = ", dt_string)
print("COMPLETE")
return dt_string
@celery.task(bind=True)
def export_deck(self,user_id,deck_id):
now = datetime.now()
print("STARTING DECK EXPORT JOB AT",now)
print("Job id",self.request.id)
deck = Deck.query.filter(Deck.deck_id == deck_id).scalar()
print(deck.deck_name)
f = open(f'static/files/{self.request.id}.txt','w',encoding='utf-8')
for card in deck.cards:
f.write(card.front+','+card.back+'\n')
f.close()
time.sleep(4)
print("DECK EXPORT JOB COMPLETE")
message = "Exported the deck {}".format(deck_id)
export_url = 'http://localhost:5000/static/files/{}.txt'.format(self.request.id)
sse.publish({"job_id":self.request.id,"message":message,"url":export_url}, type='Export', channel=str(user_id))
return "EXPORT COMPLETED"
@celery.task(bind=True)
def cleanup_exports(self):
try:
files = os.listdir('static/files')
for file in files:
os.remove(f'static/files/{file}')
except:
pass
@celery.task(bind=True)
def daily_remainder(self):
users = User.query.all()
for user in users:
url = user.webhook_url
payload = {
"username": "FlashCardApp Bot",
"avatar_url": "https://gravatar.com/avatar/6a4bebbfceea7807eb0758fb32510a64?s=400&d=robohash&r=x",
"embeds": [
{
"title": "Revise Today!",
"description": "Revising daily keeps your mind sharp. Don't forget to revise today.",
"color": 15258703
}
]
}
try:
response = requests.request("POST",url,json=payload)
print(response.text)
except:
pass
@celery.task(bind=True)
def webhook_test(self,user_id):
user = User.query.filter(User.id == user_id).scalar()
url = user.webhook_url
payload = {
"username": "FlashCardApp Bot",
"avatar_url": "https://gravatar.com/avatar/6a4bebbfceea7807eb0758fb32510a64?s=400&d=robohash&r=x",
"embeds": [
{
"title": "Title",
"description": "Text message. You can use Markdown here. *Italic* **bold** __underline__ ~~strikeout~~ [hyperlink](https://google.com) `code`",
"color": 15258703
}
]
}
try:
response = requests.request("POST",url,json=payload)
print(response.text)
except:
pass
|
import sys
from utils.utils import load_config
from utils.log import log
from base.trainer import Trainer
from models.wgan_gp import WGAN_GP
from models.wgan import WGAN
from models.dcgan import DCGAN
from app import app
def start_train_session(Model, cfg_path):
log.info("Setting up Trainer...")
trainer = Trainer(Model, cfg_path)
log.info("Starting Training...")
trainer.train()
@app.command()
def wgan_gp(cfg_path: str = "./config/config.yml"):
log.info("WGAN-GP selected for training...")
start_train_session(WGAN_GP, cfg_path)
@app.command()
def wgan(cfg_path: str = "./config/config.yml"):
log.info("WGAN selected for training...")
start_train_session(WGAN, cfg_path)
@app.command()
def dcgan(cfg_path: str = "./config/config.yml"):
log.info("DCGAN selected for training...")
start_train_session(DCGAN, cfg_path)
if __name__ == "__main__":
app()
|
from datetime import datetime
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
from sqlalchemy import UniqueConstraint
from sqlalchemy.dialects.postgresql import JSON
from passlib.apps import custom_app_context as pwd_context
from mongoengine import *
from entity_matching_tool import db, app
class Job(db.Model):
__tablename__ = 'jobs'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
source1 = db.Column(db.String())
source2 = db.Column(db.String())
selectedFields = db.Column(JSON)
outputFileName = db.Column(db.String(), unique=True)
creator = db.Column(db.Integer, db.ForeignKey('users.id')) # ForeignKey - сязывает Job и User
creationDate = db.Column(db.DateTime)
metric = db.Column(db.String())
__table_args__ = (UniqueConstraint('creator', 'source1', 'source2', name='unique_creator_with_sources'),)
def __init__(self, name, source1, source2, selected_fields, output_file_name, metric,
creator, creation_date=None):
self.name = name
self.source1 = source1
self.source2 = source2
self.selectedFields = selected_fields
self.outputFileName = output_file_name
self.metric = metric
self.creator = creator
if creation_date:
self.creationDate = creation_date
else:
self.creationDate = datetime.utcnow()
def __repr__(self):
return '<Job: "{}">'.format(self.name)
def to_dict(self):
job_dict = dict(self.__dict__)
job_dict.pop('_sa_instance_state', None)
job_dict['creationDate'] = job_dict['creationDate'].isoformat()
return job_dict
def save(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
class MongoEntity(Document):
Id = IntField()
jobId = IntField()
isFirstSource = BooleanField()
name = StringField()
otherFields = DictField()
isMatched = BooleanField(default=False)
def to_dict(self):
entity_dict = dict(Id=self.Id, jobId=self.jobId, isFirstSource=self.isFirstSource, name=self.name,
otherFields=self.otherFields, isMatched=self.isMatched)
return entity_dict
def set_as_matched(self):
self.isMatched = True
self.save()
def __repr__(self):
return '<Entity: "{}">'.format(self.name)
class MongoMatchedEntities(Document):
Id = IntField()
entity1_id = IntField()
entity2_id = IntField()
jobId = IntField()
def to_dict(self):
entity_dict = dict(Id=self.Id, jobId=self.jobId, entity1_id=self.entity1_id, entity2_id=self.entity2_id)
return entity_dict
def __repr__(self):
return '<Matched Entities: {}, {}>'.format(self.entity1_id, self.entity2_id)
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
userName = db.Column(db.String(80), unique=True, index=True)
passwordHash = db.Column(db.String(128))
def __init__(self, user_name):
self.userName = user_name
def __repr__(self):
return '<User: {}>'.format(self.userName)
def save(self):
db.session.add(self)
db.session.commit()
def hash_password(self, password):
self.passwordHash = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.passwordHash)
def generate_auth_token(self, expiration=None):
s = Serializer(app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return None
except BadSignature:
return None
user = User.query.get(data['id'])
return user
db.create_all()
|
import sys
from flask import Flask
from httpobs.conf import DEVELOPMENT_MODE, API_PORT, API_PROPAGATE_EXCEPTIONS
from httpobs.website import add_response_headers
from httpobs.website.api import api
from httpobs.website.monitoring import monitoring_api
def __exit_with(msg: str) -> None:
print(msg)
sys.exit(1)
# Register the application with flask
app = Flask('http-observatory')
app.config['PROPAGATE_EXCEPTIONS'] = API_PROPAGATE_EXCEPTIONS
app.register_blueprint(api)
app.register_blueprint(monitoring_api)
@app.route('/')
@add_response_headers()
def main() -> str:
return 'Welcome to the HTTP Observatory!'
if __name__ == '__main__':
app.run(debug=DEVELOPMENT_MODE,
port=API_PORT)
|
from malaya_speech.model.frame import Frame
from malaya_speech.utils.padding import (
sequence_1d,
)
from malaya_speech.utils.astype import float_to_int
from malaya_speech.utils.featurization import universal_mel
from malaya_speech.model.abstract import Abstract
from malaya_speech.utils.constant import MEL_MEAN, MEL_STD
from typing import Callable
class TTS:
def gradio(self, vocoder: Callable, **kwargs):
"""
Text-to-Speech on Gradio interface.
Parameters
----------
vocoder: bool, Callable
vocoder object that has `predict` method, prefer from malaya_speech itself.
**kwargs: keyword arguments for `predict` and `iface.launch`.
"""
try:
import gradio as gr
except BaseException:
raise ModuleNotFoundError(
'gradio not installed. Please install it by `pip install gradio` and try again.'
)
def pred(string):
r = self.predict(string=string, **kwargs)
if 'universal' in str(vocoder):
o = r['universal-output']
else:
o = r['mel-output']
y_ = vocoder(o)
data = float_to_int(y_)
return (22050, data)
title = 'Text-to-Speech'
description = 'It will take sometime for the first time, after that, should be really fast.'
iface = gr.Interface(pred, gr.inputs.Textbox(lines=3, label='Input Text'),
'audio', title=title, description=description)
return iface.launch(**kwargs)
class Vocoder(Abstract, TTS):
def __init__(self, input_nodes, output_nodes, sess, model, name):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(self, inputs):
"""
Change Mel to Waveform.
Parameters
----------
inputs: List[np.array]
List[np.array] or List[malaya_speech.model.frame.Frame].
Returns
-------
result: List
"""
inputs = [
input.array if isinstance(input, Frame) else input
for input in inputs
]
padded, lens = sequence_1d(inputs, return_len=True)
r = self._execute(
inputs=[padded],
input_labels=['Placeholder'],
output_labels=['logits'],
)
return r['logits'][:, :, 0]
def __call__(self, input):
return self.predict([input])[0]
class Tacotron(Abstract, TTS):
def __init__(
self, input_nodes, output_nodes, normalizer, stats, sess, model, name
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._normalizer = normalizer
self._stats = stats
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(self, string, **kwargs):
"""
Change string to Mel.
Parameters
----------
string: str
Returns
-------
result: Dict[string, decoder-output, mel-output, universal-output, alignment]
"""
t, ids = self._normalizer.normalize(string, **kwargs)
r = self._execute(
inputs=[[ids], [len(ids)]],
input_labels=['Placeholder', 'Placeholder_1'],
output_labels=[
'decoder_output',
'post_mel_outputs',
'alignment_histories',
],
)
v = r['post_mel_outputs'][0] * self._stats[1] + self._stats[0]
v = (v - MEL_MEAN) / MEL_STD
return {
'string': t,
'ids': ids,
'decoder-output': r['decoder_output'][0],
'mel-output': r['post_mel_outputs'][0],
'universal-output': v,
'alignment': r['alignment_histories'][0],
}
def __call__(self, input):
return self.predict(input)
class Fastspeech(Abstract, TTS):
def __init__(
self, input_nodes, output_nodes, normalizer, stats, sess, model, name
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._normalizer = normalizer
self._stats = stats
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(
self,
string,
speed_ratio: float = 1.0,
f0_ratio: float = 1.0,
energy_ratio: float = 1.0,
**kwargs,
):
"""
Change string to Mel.
Parameters
----------
string: str
speed_ratio: float, optional (default=1.0)
Increase this variable will increase time voice generated.
f0_ratio: float, optional (default=1.0)
Increase this variable will increase frequency, low frequency will generate more deeper voice.
energy_ratio: float, optional (default=1.0)
Increase this variable will increase loudness.
Returns
-------
result: Dict[string, decoder-output, mel-output, universal-output]
"""
t, ids = self._normalizer.normalize(string, **kwargs)
r = self._execute(
inputs=[[ids], [speed_ratio], [f0_ratio], [energy_ratio]],
input_labels=[
'Placeholder',
'speed_ratios',
'f0_ratios',
'energy_ratios',
],
output_labels=['decoder_output', 'post_mel_outputs'],
)
v = r['post_mel_outputs'][0] * self._stats[1] + self._stats[0]
v = (v - MEL_MEAN) / MEL_STD
return {
'string': t,
'ids': ids,
'decoder-output': r['decoder_output'][0],
'mel-output': r['post_mel_outputs'][0],
'universal-output': v,
}
def __call__(self, input, **kwargs):
return self.predict(input, **kwargs)
class FastVC(Abstract):
def __init__(
self,
input_nodes,
output_nodes,
speaker_vector,
magnitude,
sess,
model,
name,
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._speaker_vector = speaker_vector
self._magnitude = magnitude
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(self, original_audio, target_audio):
"""
Change original voice audio to follow targeted voice.
Parameters
----------
original_audio: np.array or malaya_speech.model.frame.Frame
target_audio: np.array or malaya_speech.model.frame.Frame
Returns
-------
result: Dict[decoder-output, mel-output]
"""
original_audio = (
input.array if isinstance(original_audio, Frame) else original_audio
)
target_audio = (
input.array if isinstance(target_audio, Frame) else target_audio
)
original_mel = universal_mel(original_audio)
target_mel = universal_mel(target_audio)
original_v = self._magnitude(self._speaker_vector([original_audio])[0])
target_v = self._magnitude(self._speaker_vector([target_audio])[0])
r = self._execute(
inputs=[
[original_mel],
[original_v],
[target_v],
[len(original_mel)],
],
input_labels=[
'mel',
'ori_vector',
'target_vector',
'mel_lengths',
],
output_labels=['mel_before', 'mel_after'],
)
return {
'decoder-output': r['mel_before'][0],
'mel-output': r['mel_after'][0],
}
def __call__(self, original_audio, target_audio):
return self.predict(original_audio, target_audio)
class Fastpitch(Abstract, TTS):
def __init__(
self, input_nodes, output_nodes, normalizer, stats, sess, model, name
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._normalizer = normalizer
self._stats = stats
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(
self,
string,
speed_ratio: float = 1.0,
pitch_ratio: float = 1.0,
pitch_addition: float = 0.0,
**kwargs,
):
"""
Change string to Mel.
Parameters
----------
string: str
speed_ratio: float, optional (default=1.0)
Increase this variable will increase time voice generated.
pitch_ratio: float, optional (default=1.0)
pitch = pitch * pitch_ratio, amplify existing pitch contour.
pitch_addition: float, optional (default=0.0)
pitch = pitch + pitch_addition, change pitch contour.
Returns
-------
result: Dict[string, decoder-output, mel-output, pitch-output, universal-output]
"""
t, ids = self._normalizer.normalize(string, **kwargs)
r = self._execute(
inputs=[[ids], [speed_ratio], [pitch_ratio], [pitch_addition]],
input_labels=[
'Placeholder',
'speed_ratios',
'pitch_ratios',
'pitch_addition',
],
output_labels=['decoder_output', 'post_mel_outputs', 'pitch_outputs'],
)
v = r['post_mel_outputs'][0] * self._stats[1] + self._stats[0]
v = (v - MEL_MEAN) / MEL_STD
return {
'string': t,
'ids': ids,
'decoder-output': r['decoder_output'][0],
'mel-output': r['post_mel_outputs'][0],
'pitch-output': r['pitch_outputs'][0],
'universal-output': v,
}
def __call__(self, input, **kwargs):
return self.predict(input, **kwargs)
class GlowTTS(Abstract, TTS):
def __init__(
self, input_nodes, output_nodes, normalizer, stats, sess, model, name, **kwargs
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._normalizer = normalizer
self._stats = stats
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(
self,
string,
temperature: float = 0.3333,
length_ratio: float = 1.0,
**kwargs,
):
"""
Change string to Mel.
Parameters
----------
string: str
temperature: float, optional (default=0.3333)
Decoder model trying to decode with encoder(text) + random.normal() * temperature.
length_ratio: float, optional (default=1.0)
Increase this variable will increase time voice generated.
Returns
-------
result: Dict[string, ids, mel-output, alignment, universal-output]
"""
t, ids = self._normalizer.normalize(string, **kwargs)
r = self._execute(
inputs=[[ids], [len(ids)], [temperature], [length_ratio]],
input_labels=[
'input_ids',
'lens',
'temperature',
'length_ratio',
],
output_labels=['mel_output', 'alignment_histories'],
)
v = r['mel_output'][0] * self._stats[1] + self._stats[0]
v = (v - MEL_MEAN) / MEL_STD
return {
'string': t,
'ids': ids,
'mel-output': r['mel_output'][0],
'alignment': r['alignment_histories'][0].T,
'universal-output': v,
}
def __call__(self, input, **kwargs):
return self.predict(input, **kwargs)
class GlowTTS_MultiSpeaker(Abstract):
def __init__(
self, input_nodes, output_nodes, normalizer, speaker_vector, stats, sess, model, name
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._normalizer = normalizer
self._speaker_vector = speaker_vector
self._sess = sess
self.__model__ = model
self.__name__ = name
def _predict(self, string, left_audio, right_audio,
temperature: float = 0.3333,
length_ratio: float = 1.0, **kwargs):
t, ids = self._normalizer.normalize(string, **kwargs)
left_v = self._speaker_vector([left_audio])
right_v = self._speaker_vector([right_audio])
r = self._execute(
inputs=[[ids], [len(ids)], [temperature], [length_ratio], left_v, right_v],
input_labels=[
'input_ids',
'lens',
'temperature',
'length_ratio',
'speakers',
'speakers_right',
],
output_labels=['mel_output', 'alignment_histories'],
)
return {
'string': t,
'ids': ids,
'alignment': r['alignment_histories'][0].T,
'universal-output': r['mel_output'][0][:-8],
}
def predict(
self,
string,
audio,
temperature: float = 0.3333,
length_ratio: float = 1.0,
**kwargs,
):
"""
Change string to Mel.
Parameters
----------
string: str
audio: np.array
np.array or malaya_speech.model.frame.Frame, must in 16k format.
We only trained on `female`, `male`, `husein` and `haqkiem` speakers.
temperature: float, optional (default=0.3333)
Decoder model trying to decode with encoder(text) + random.normal() * temperature.
length_ratio: float, optional (default=1.0)
Increase this variable will increase time voice generated.
Returns
-------
result: Dict[string, ids, alignment, universal-output]
"""
return self._predict(string=string,
left_audio=audio, right_audio=audio,
temperature=temperature, length_ratio=length_ratio, **kwargs)
def voice_conversion(self, string, original_audio, target_audio,
temperature: float = 0.3333,
length_ratio: float = 1.0,
**kwargs,):
"""
Change string to Mel.
Parameters
----------
string: str
original_audio: np.array
original speaker to encode speaking style, must in 16k format.
target_audio: np.array
target speaker to follow speaking style from `original_audio`, must in 16k format.
temperature: float, optional (default=0.3333)
Decoder model trying to decode with encoder(text) + random.normal() * temperature.
length_ratio: float, optional (default=1.0)
Increase this variable will increase time voice generated.
Returns
-------
result: Dict[string, ids, alignment, universal-output]
"""
return self._predict(string=string,
left_audio=original_audio, right_audio=target_audio,
temperature=temperature, length_ratio=length_ratio, **kwargs)
def __call__(self, input, **kwargs):
return self.predict(input, **kwargs)
|
from django.apps import AppConfig
class PrimerValConfig(AppConfig):
name = 'primer'
|
#!/usr/bin/python
#-*-coding:utf-8-*-
import os
from scrapy import log
from scrapy.http import Request
from scrapy.contrib.pipeline.images import ImagesPipeline
from woaidu_crawler.utils.select_result import list_first_item
class WoaiduCoverImage(ImagesPipeline):
"""
this is for download the book covor image and then complete the
book_covor_image_path field to the picture's path in the file system.
"""
def __init__(self, store_uri, download_func=None):
self.images_store = store_uri
super(WoaiduCoverImage,self).__init__(store_uri, download_func=None)
def get_media_requests(self, item, info):
if item.get('book_covor_image_url'):
yield Request(item['book_covor_image_url'])
def item_completed(self, results, item, info):
if self.LOG_FAILED_RESULTS:
msg = '%s found errors proessing %s' % (self.__class__.__name__, item)
for ok, value in results:
if not ok:
log.err(value, msg, spider=info.spider)
image_paths = [x['path'] for ok, x in results if ok]
image_path = list_first_item(image_paths)
item['book_covor_image_path'] = os.path.join(os.path.abspath(self.images_store),image_path) if image_path else ""
return item
|
#!/usr/bin/env python3
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="animepirate-pantuts",
version="0.1.4",
author="Nick Bien",
author_email="pantuts@gmail.com",
description="Dumb anime videos downloader.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pantuts/animepirate",
packages=setuptools.find_packages(),
entry_points={
'console_scripts': [
'pirateanime = animepirate.main:main',
],
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: MIT License"
],
python_requires='>=3.6',
)
|
import urllib.request
import json
from geopy.geocoders import Nominatim
from timezonefinder import TimezoneFinder
import datetime as dt
from time import sleep as wait
import pytz
with urllib.request.urlopen("https://geolocation-db.com/json") as url:
data = json.loads(url.read().decode())
geolocator = Nominatim(user_agent="geoapiExercises")
lad = data['country_name']
location = geolocator.geocode(lad)
obj = TimezoneFinder()
tzr = obj.timezone_at(lng=location.longitude, lat=location.latitude)
source_date = dt.datetime.now()
currentTimeZone = pytz.timezone(tzr)
currentDateWithTimeZone = currentTimeZone.localize(source_date)
import os
while True:
print("-----WELCOME TO CLIENT ONLINE CLOCK-----")
print(f"Country : {lad}")
print(f'Time Zone Is Set To : {tzr}')
now = dt.datetime.now(tz=currentTimeZone)
full_clock = now.strftime("%I:%M:%S %p")
day = now.strftime("%A %d-%m-%Y")
print(f"{day}")
print(f"{full_clock}")
wait(0.1)
os.system('cls')
|
from girder.api import access
from girder.api.rest import Resource, filtermodel, RestException
from girder.api.describe import Description, autoDescribeRoute
from girder.constants import SortDir, AccessType
from ..models.user_module import UserModule as UserModuleModel
from girder import logprint
class UserModule(Resource):
def __init__(self):
super(UserModule, self).__init__()
self.resourceName = 'user_module'
self.route('GET', (), self.list_userModules)
self.route('GET', (':id',), self.get_userModule)
self.route('POST', (), self.create_userModule)
self.route('PUT', (':id',), self.update_userModule)
self.route('DELETE', (':id',), self.delete_userModule)
self.model = UserModuleModel()
@access.user
@autoDescribeRoute(
Description("Get all userModules")
)
def list_userModules(self):
list = []
for userModule in self.model.list():
list.append(userModule)
logprint(list)
return list
@access.user
@autoDescribeRoute(
Description('Get a userModule')
.modelParam('id', 'ID of the userModule', model=UserModuleModel)
.errorResponse('ID was invalid.')
)
def get_userModule(self, userModule, params):
return userModule
@access.user
@autoDescribeRoute(
Description('Create a new userModule.')
.responseClass('userModule')
.param('name', 'A variable', required=True)
.param('url', 'A variable', required=True)
.errorResponse('You are not authorized to create reports.', 403)
)
def create_userModule(self, name, url):
user = self.getCurrentUser()
return self.model.create(
name, url = url, creator=user, save=True
)
@access.user
@autoDescribeRoute(
Description('Update a userModule')
.modelParam('id', 'ID of the userModule', model=UserModuleModel)
.param('name', 'A variable', required=True)
.param('url', 'A variable', required=True)
)
def update_userModule(self, userModule, name, url):
user = self.getCurrentUser()
if name is not None:
userModule['name'] = name
if url is not None:
userModule['url'] = url
userModule = self.model.update(userModule)
return userModule
@access.user
@autoDescribeRoute(
Description('Delete an existing userModule.')
.modelParam('id', 'The report ID', model=UserModuleModel)
.errorResponse('ID was invalid.')
.errorResponse('Admin access was denied for the report.', 403)
)
def delete_userModule(self, userModule, params):
logprint(userModule)
self.model.remove(userModule)
return userModule['_id']
|
#!/usr/bin/env python3
import glob
import os
import numpy as np
import argparse
import pickle
import logging
from sklearn import model_selection
import model
from util import io_util, logging_util
from data_class import opunit_data
from info import data_info
from training_util import data_transforming_util, result_writing_util
from type import Target
np.set_printoptions(precision=4)
np.set_printoptions(edgeitems=10)
np.set_printoptions(suppress=True)
class MiniTrainer:
"""
Trainer for the mini models
"""
def __init__(self, input_path, model_metrics_path, ml_models, test_ratio):
self.input_path = input_path
self.model_metrics_path = model_metrics_path
self.ml_models = ml_models
self.test_ratio = test_ratio
def train(self):
"""Train the mini-models
:return: the map of the trained models
"""
data_list = []
# First get the data for all mini runners
for filename in glob.glob(os.path.join(self.input_path, '*.csv')):
print(filename)
data_list += opunit_data.get_mini_runner_data(filename)
model_map = {}
# train the models for all the operating units
for data in data_list:
x_train, x_test, y_train, y_test = model_selection.train_test_split(data.x, data.y,
test_size=self.test_ratio,
random_state=0)
# Write the first header rwo to the result file
metrics_path = "{}/{}.csv".format(self.model_metrics_path, data.opunit.name.lower())
prediction_path = "{}/{}_prediction.csv".format(self.model_metrics_path, data.opunit.name.lower())
result_writing_util.create_metrics_and_prediction_files(metrics_path, prediction_path)
methods = self.ml_models
# Only use linear regression for the arithmetic operating units
if data.opunit in data_info.ARITHMETIC_OPUNITS:
methods = ["lr"]
# Also test the prediction with the target transformer (if specified for the operating unit)
transformers = [None]
modeling_transformer = data_transforming_util.OPUNIT_MODELING_TRANSFORMER_MAP[data.opunit]
if modeling_transformer is not None:
transformers.append(modeling_transformer)
min_percentage_error = 1
pred_results = None
elapsed_us_index = data_info.TARGET_CSV_INDEX[Target.ELAPSED_US]
for transformer in transformers:
for method in methods:
# Train the model
logging.info("{} {}".format(data.opunit.name, method))
regressor = model.Model(method, modeling_transformer=transformer)
regressor.train(x_train, y_train)
# Evaluate on both the training and test set
results = []
evaluate_data = [(x_train, y_train), (x_test, y_test)]
train_test_label = ["Train", "Test"]
for i, d in enumerate(evaluate_data):
evaluate_x = d[0]
evaluate_y = d[1]
y_pred = regressor.predict(evaluate_x)
logging.debug("x shape: {}".format(evaluate_x.shape))
logging.debug("y shape: {}".format(y_pred.shape))
percentage_error = np.average(np.abs(evaluate_y - y_pred) / (evaluate_y + 1), axis=0)
results += list(percentage_error) + [""]
logging.info('{} Percentage Error: {}'.format(train_test_label[i], percentage_error))
# Record the model with the lowest elapsed time prediction (since that might be the most
# important prediction)
if (i == 1 and percentage_error[elapsed_us_index] < min_percentage_error and transformer ==
transformers[-1]):
min_percentage_error = percentage_error[elapsed_us_index]
model_map[data.opunit] = regressor
pred_results = (evaluate_x, y_pred, evaluate_y)
# Dump the prediction results
transform = " "
if transformer is not None:
transform = " transform"
io_util.write_csv_result(metrics_path, method + transform, results)
logging.info("")
io_util.write_csv_result(metrics_path, "", [])
# Record the best prediction results on the test data
result_writing_util.record_predictions(pred_results, prediction_path)
return model_map
# ==============================================
# main
# ==============================================
if __name__ == '__main__':
aparser = argparse.ArgumentParser(description='Mini Trainer')
aparser.add_argument('--input_path', default='mini_runner_input', help='Input file path for the mini runners')
aparser.add_argument('--model_results_path', default='mini_runner_model_results',
help='Prediction results of the mini models')
aparser.add_argument('--save_path', default='trained_model', help='Path to save the mini models')
aparser.add_argument('--ml_models', nargs='*', type=str, default=["lr", "rf", "nn"],
help='ML models for the mini trainer to evaluate')
aparser.add_argument('--test_ratio', type=float, default=0.2, help='Test data split ratio')
aparser.add_argument('--log', default='info', help='The logging level')
args = aparser.parse_args()
logging_util.init_logging(args.log)
trainer = MiniTrainer(args.input_path, args.model_results_path, args.ml_models, args.test_ratio)
trained_model_map = trainer.train()
with open(args.save_path + '/mini_model_map.pickle', 'wb') as file:
pickle.dump(trained_model_map, file)
|
# write some code using unittest to test our
# add_state_names_columns assignment
import unittest
from pandas import DataFrame
from lambdata.assignment import add_state_names_columns
class TestAssignment(unittest.TestCase):
def test_add_state_names(self):
df = DataFrame({'abbrev': ['CA', 'CO', 'CT', 'DC', 'TX']})
#print(df.head())
# ensure that our test is setup properly
self.assertEqual(len(df.columns), 1)
self.assertEqual(list(df.columns), ['abbrev'])
self.assertEqual(df.iloc[0]['abbrev'], 'CA')
# What code can we write, referencing df to know if
# our function did what it was supposed to do
# (adding a column to corresponding state names)
mapped_df = add_state_names_columns(df)
self.assertEqual(len(mapped_df.columns), 2)
self.assertEqual(list(mapped_df.columns), ['abbrev', 'name'])
self.assertEqual(mapped_df.iloc[0]['abbrev'], 'CA')
self.assertEqual(mapped_df.iloc[0]['name'], 'Cali')
if __name__ == '__main__':
unittest.main() # Invoking all our class test methods
|
import os
import platform
import shutil
import subprocess
import sys
from setuptools import setup, Distribution
import setuptools.command.build_ext as _build_ext
from setuptools.command.install import install
class Install(install):
def run(self):
install.run(self)
python_executable = sys.executable
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
setup(name='weldnumpy',
version='0.0.1',
packages=['weldnumpy'],
cmdclass={"install": Install},
distclass=BinaryDistribution,
url='https://github.com/weld-project/weld',
author='Weld Developers',
author_email='weld-group@lists.stanford.edu',
install_requires=['pyweld'])
|
import argparse
class DevOpsOptions:
def __init__(self):
parser = argparse.ArgumentParser(description='')
parser.add_argument('--env', action="store", dest='env', default='local')
parser.add_argument('--command', action="store", dest='command', default='start')
parser.add_argument('--arch', action="store", dest='arch', default='x86_64')
results = parser.parse_args()
self.command = results.command
self.env = results.env
self.arch = results.arch
self.valid = True
|
from urllib.request import urlopen
from jwcrypto.jwk import JWKSet
jwkeys = JWKSet()
jwk_sets = [
'https://www.googleapis.com/oauth2/v3/certs'
]
def load_keys():
for keyurl in jwk_sets:
with urlopen(keyurl) as key:
jwkeys.import_keyset(key.read().decode())
|
import unittest
from car_pooling import Solution
class Test(unittest.TestCase):
def test_1(self):
solution = Solution()
self.assertEqual(solution.carPooling([[2, 1, 5], [3, 3, 7]], 4), False)
def test_2(self):
solution = Solution()
self.assertEqual(solution.carPooling([[2, 1, 5], [3, 3, 7]], 5), True)
def test_3(self):
solution = Solution()
self.assertEqual(solution.carPooling([[2, 1, 5], [3, 5, 7]], 3), True)
def test_4(self):
solution = Solution()
self.assertEqual(
solution.carPooling([[7, 5, 6], [6, 7, 8], [10, 1, 6]], 16), False
)
def test_5(self):
solution = Solution()
self.assertEqual(
solution.carPooling(
[[8, 2, 3], [4, 1, 3], [1, 3, 6], [8, 4, 6], [4, 4, 8]], 12
),
False,
)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) 2010 gocept gmbh & co. kg
# See also LICENSE.txt
import gocept.zestreleaser.customupload.upload
import mock
import tempfile
import unittest
class UploadTest(unittest.TestCase):
context = {
'tagdir': '/tmp/tha.example-0.1dev',
'tag_already_exists': False,
'version': '0.1dev',
'workingdir': '/tmp/tha.example-svn',
'name': 'tha.example',
}
@mock.patch('gocept.zestreleaser.customupload.upload.choose_destination')
@mock.patch('zest.releaser.utils.ask')
def test_no_destination_should_be_noop(self, ask, choose):
choose.return_value = None
gocept.zestreleaser.customupload.upload.upload(self.context)
self.assertFalse(ask.called)
@mock.patch('gocept.zestreleaser.customupload.upload.choose_destination')
@mock.patch('zest.releaser.utils.ask')
@mock.patch('os.system')
def test_no_confirmation_should_exit(self, system, ask, choose):
choose.return_value = 'server'
ask.return_value = False
gocept.zestreleaser.customupload.upload.upload(self.context)
self.assertTrue(ask.called)
self.assertFalse(system.called)
@mock.patch('gocept.zestreleaser.customupload.upload.choose_destination')
@mock.patch('zest.releaser.utils.ask')
@mock.patch('os.system')
@mock.patch('glob.glob')
def test_call_scp(self, glob, system, ask, choose):
choose.return_value = 'server:'
ask.return_value = True
glob.return_value = [
'/tmp/tha.example-0.1dev/dist/tha.example-0.1dev.tar.gz']
gocept.zestreleaser.customupload.upload.upload(self.context)
system.assert_called_with(
'scp -P 22 /tmp/tha.example-0.1dev/dist/tha.example-0.1dev.tar.gz '
'server:')
class ProtocollSeparatorTest(unittest.TestCase):
def get_call(self, destination):
return gocept.zestreleaser.customupload.upload.get_calls(
['/path/to/source1', '/path/to/source2'], destination)
def test_no_protocol_should_use_scp(self):
self.assertEqual(
[['scp', '-P 22', '/path/to/source1', '/path/to/source2',
'localhost:/apath']],
self.get_call('localhost:/apath'))
def test_scp_should_use_scp(self):
self.assertEqual(
[['scp', '-P 22', '/path/to/source1', '/path/to/source2',
'localhost:apath']],
self.get_call('scp://localhost/apath'))
def test_scp_should_allow_absolute_path(self):
self.assertEqual(
[['scp', '-P 22', '/path/to/source1', '/path/to/source2',
'localhost:/apath']],
self.get_call('scp://localhost//apath'))
def test_scp_with_different_port(self):
self.assertEqual(
[['scp', '-P 7569', '/path/to/source1', '/path/to/source2',
'localhost:/apath']],
self.get_call('scp://localhost:7569//apath'))
def test_http_should_use_curl_and_put(self):
self.assertEqual(
[['curl', '-X', 'PUT', '--data-binary', '@/path/to/source1',
'http://localhost/apath/source1'],
['curl', '-X', 'PUT', '--data-binary', '@/path/to/source2',
'http://localhost/apath/source2']],
self.get_call('http://localhost/apath'))
def test_https_should_use_curl_and_put(self):
self.assertEqual(
[['curl', '-X', 'PUT', '--data-binary', '@/path/to/source1',
'https://localhost/apath/source1'],
['curl', '-X', 'PUT', '--data-binary', '@/path/to/source2',
'https://localhost/apath/source2']],
self.get_call('https://localhost/apath'))
def test_http_should_honour_trailing_slash(self):
self.assertEqual(
[['curl', '-X', 'PUT', '--data-binary', '@/path/to/source1',
'http://localhost/apath/source1'],
['curl', '-X', 'PUT', '--data-binary', '@/path/to/source2',
'http://localhost/apath/source2']],
self.get_call('http://localhost/apath/'))
def test_https_should_add_additional_options_to_curl(self):
self.assertEqual(
[['curl', '--insecure', '-X', 'PUT', '--data-binary',
'@/path/to/source1', 'http://localhost/apath/source1'],
['curl', '--insecure', '-X', 'PUT', '--data-binary',
'@/path/to/source2', 'http://localhost/apath/source2']],
self.get_call('--insecure http://localhost/apath/'))
def test_sftp(self):
self.assertEqual(
[['echo', '"put /path/to/source1"', '|', 'sftp', '-P 22',
'-b', '-', 'user@localhost://apath'],
['echo', '"put /path/to/source2"', '|', 'sftp', '-P 22',
'-b', '-', 'user@localhost://apath']],
self.get_call('sftp://user@localhost//apath'))
def test_sftp_with_different_port(self):
self.assertEqual(
[['echo', '"put /path/to/source1"', '|', 'sftp', '-P 7596',
'-b', '-', 'user@localhost://apath'],
['echo', '"put /path/to/source2"', '|', 'sftp', '-P 7596',
'-b', '-', 'user@localhost://apath']],
self.get_call('sftp://user@localhost:7596//apath'))
class ConfigTest(unittest.TestCase):
@mock.patch('os.path.expanduser')
def test_returns_configparser(self, expanduser):
tmpfile = tempfile.NamedTemporaryFile()
expanduser.return_value = tmpfile.name
tmpfile.write(u"""
[gocept.zestreleaser.customupload]
my.package = my.dest
other.package = other.dest
""".encode('ascii'))
tmpfile.flush()
config = gocept.zestreleaser.customupload.upload.read_configuration(
'mock')
self.assertEqual('my.dest', config.get(
'gocept.zestreleaser.customupload', 'my.package'))
def test_file_not_present_silently_ignores_it(self):
config = gocept.zestreleaser.customupload.upload.read_configuration(
'doesnotexist')
self.assertEqual([], config.sections())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/12/30 下午5:11
# @Title : 1. 两数之和
# @Link : https://leetcode-cn.com/problems/two-sum/
QUESTION = """
给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。
你可以假设每种输入只会对应一个答案。但是,你不能重复利用这个数组中同样的元素。
示例:
给定 nums = [2, 7, 11, 15], target = 9
因为 nums[0] + nums[1] = 2 + 7 = 9
所以返回 [0, 1]
"""
THINKING = """
因为这里假设只有一个答案,不会重复,所以思路自然就想到应用到哈希表
哈希表里记录的是{value: index},即数组中的值对应的索引index
这样从头开始遍历数组的时候,每次为了计算出了两个数字之和为target,则每次判断的是(target-当前数字)在哈希表中是否存在
如果存在则表示 target - 当前数字 = 哈希表中已存在的数字,那么答案就出来了,就是[当前数字的索引, 哈希表中已存在的数字的索引]
"""
from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
value_to_index = {}
for index, value in enumerate(nums):
exist_index = value_to_index.get(target - value)
if exist_index is not None:
return [exist_index, index]
value_to_index[value] = index
if __name__ == '__main__':
s = Solution()
nums = [2, 7, 11, 15]
target = 9
print(s.twoSum(nums, target))
|
from inception.image.matte.bordermatte import *
if __name__ == '__main__':
from inception.image.image import Image
#image = Image.from_filepath("../../../../test/images/buff_small.png")#traditional-buffets-and-sideboards.jpg")
image = Image.from_filepath("../../../../test/images/traditional-buffets-and-sideboards.jpg")
image = Image.from_filepath("../../../../test/images/cookiejar.jpg")
#image = Image.from_filepath("../../../../test/images/buff_small.png")
import time
t1 = time.time()
print("Starting")
result = alphamatte(image.data)
t2 = time.time()
print("Done (in %2.2f seconds)!" % (t2-t1))
filename = image.filename
image.to_rgba()
image[..., 3] = result
"""
image = image[...,0]
image = Image(image)
image.to_rgb()
image[..., 0] = result
image[..., 1] = result
image[..., 2] = result
"""
from PIL import Image as PILImage
green = Image.from_any(PILImage.new('RGBA', (image.shape[1], image.shape[0]), color=(0,255,0,255)))
from inception.image.operation.merge import MergeOperation
mop = MergeOperation([green, image]).run()
mop.save(filename.rsplit('.',1)[0] + '_greencomp.png')
Image.from_any(result).save(filename.rsplit('.',1)[0] + '_matte.png')
|
"""Unit test package for kraken."""
|
import time
from typing import Dict, Union
import requests
def listening_expression(
misty_ip: str,
colour: Dict = {"red": "0", "green": "125", "blue": "255"},
sound: Dict = {"FileName": "s_SystemWakeWord.wav"},
duration: Union[float, int] = 1.5,
) -> None:
requests.post("http://%s/api/led" % misty_ip, json=colour)
requests.post("http://%s/api/audio/play" % misty_ip, json=sound)
time.sleep(duration)
requests.post(
"http://%s/api/led" % misty_ip, json={"red": "0", "green": "0", "blue": "0"}
)
if __name__ == "__main__":
listening_expression("192.168.0.103")
|
from .model_field import Field
from ..settings import project_db
from ..utils import bson_parse
class Institution:
def __init__(self, _id):
self.name = ""
self._id = _id
self.fields = []
def add_field(self, name):
field_item = project_db["fields"].insert_one({"name": name})
field_id = field_item.inserted_id
field_instance = Field(field_id)
field_instance.name = name
self.fields.add(field_id)
def load(self):
item = project_db["institutions"].find_one({"_id": self._id})
return bson_parse(item)
def get_json(self):
item = self.load()
item["fields"] = [Field(field).load() for field in self.fields]
return item
def commit(self):
project_db["institutions"].replace_one(
{"_id": self._id},
self.__dict__,
upsert=True)
|
from django.db import models
from django.core.exceptions import ValidationError
from django.db.models.fields.related import ForeignObject
from google_address.models import Address
try:
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor
except ImportError:
from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor as ForwardManyToOneDescriptor
# Python 3 fixes.
import sys
if sys.version > '3':
long = int
basestring = (str, bytes)
unicode = str
class AddressDescriptor(ForwardManyToOneDescriptor):
def __set__(self, inst, value):
super(AddressDescriptor, self).__set__(inst, self.to_python(value))
def to_python(self, value):
if value is None:
return None
# Is it already an address object?
if isinstance(value, Address):
return value
# If we have an integer, assume it is a model primary key. This is mostly for
# Django being a cunt.
elif isinstance(value, (int, long)):
try:
return Address.objects.get(pk=value)
except Address.DoesNotExist:
raise ValidationError('Invalid address id.')
# A string is considered a raw value.
elif isinstance(value, basestring):
obj = Address(raw=value)
obj.save()
return obj
# A dictionary of named address components.
elif isinstance(value, dict):
return self.address_from_dict()
# Not in any of the formats I recognise.
raise ValidationError('Invalid address value.')
def address_from_dict(self):
raise NotImplementedError()
class AddressField(models.ForeignKey):
description = 'An address'
def __init__(self, *args, **kwargs):
kwargs['to'] = 'google_address.address'
super(AddressField, self).__init__(**kwargs)
def contribute_to_class(self, cls, name, *args, **kwargs):
super(ForeignObject, self).contribute_to_class(cls, name, *args, **kwargs)
setattr(cls, self.name, AddressDescriptor(self))
|
N = int(input())
A = list(map(int, input().split()))
m = 1000000007
A.sort()
if N % 2 == 0:
B = list(range(1, N, 2)) * 2
else:
B = [0] + list(range(2, N, 2)) * 2
B.sort()
if A != B:
print(0)
else:
result = 1
for i in range(N // 2):
result *= 2
result %= m
print(result)
|
"""
Автор: Орел Максим
Группа: КБ-161
Вариант: 11
Дата создания: 2/05/2018
Python Version: 3.6
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from scipy.interpolate import interp1d
# useful to understand http://mathprofi.ru/metod_naimenshih_kvadratov.html
x = [4.08, 4.42, 2.52, -0.08, 2.14, 3.36, 7.35, 5.00]
y = [18.31, 21.85, 16.93, -8.23, 10.90, 17.18, 36.45, 24.11]
def build_points(x_array, y_array):
for i in range(0, len(x_array)):
plt.scatter(x_array[i], y_array[i])
def linear_function(x_array, y_array):
# solving next system
# a*x^2 + b*x - x*y = 0
# a*x + b - y = 0
sums = [0, 0, 0, 0]
for i in range(len(x_array)):
sums[0] += x_array[i] * x_array[i]
sums[1] += x_array[i]
sums[2] += x_array[i] * y_array[i]
sums[3] += y_array[i]
left = np.array([[sums[0], sums[1]], [sums[1], len(x_array)]])
right = np.array([sums[2], sums[3]])
a, b = np.linalg.solve(left, right)
deviation = 0
for i in range(len(x_array)):
deviation += (y_array[i] - a * x_array[i] - b) ** 2
print('Отклонение для y = a*x + b:', deviation)
points = np.linspace(min(x_array) - 0.228, max(x_array) + 0.228)
plt.plot(points, a * points + b, label='y = a*x + b')
def hyperbolic_function(x_array, y_array):
# solving next system
# a/x^2 + b/x - y/x = 0
# a/x + b - y = 0
sums = [0, 0, 0, 0]
for i in range(len(x_array)):
sums[0] += 1 / (x_array[i] * x_array[i])
sums[1] += 1 / x_array[i]
sums[2] += y_array[i] / x_array[i]
sums[3] += y_array[i]
left = np.array([[sums[0], sums[1]], [sums[1], len(x_array)]])
right = np.array([sums[2], sums[3]])
a, b = np.linalg.solve(left, right)
deviation = 0
for i in range(len(x_array)):
deviation += (y_array[i] - a / x_array[i] - b) ** 2
print('Отклонение для y = a/x + b:', deviation)
points = np.linspace(min(x_array) - 0.228, max(x_array) + 0.228)
plt.plot(points, a / points + b, label='y = a/x + b')
def logarithmic_function(x_array, y_array):
# solving next system
# a*ln(x)^2 + b*ln(x) - y*ln(x) = 0
# a*ln(x) + b - y = 0
sums = [0, 0, 0, 0]
for i in range(len(x_array)):
if x_array[i] < 0:
continue
sums[0] += np.log(x_array[i]) * np.log(x_array[i])
sums[1] += np.log(x_array[i])
sums[2] += y_array[i] * np.log(x_array[i])
sums[3] += y_array[i]
left = np.array([[sums[0], sums[1]], [sums[1], len(x_array)]])
right = np.array([sums[2], sums[3]])
a, b = np.linalg.solve(left, right)
deviation = 0
for i in range(len(x_array)):
if x_array[i] < 0:
continue
deviation += (y_array[i] - a * np.log(x_array[i]) - b) ** 2
print('Отклонение для y = a*ln(x) + b:', deviation)
points = np.linspace(0.1, max(x_array) + 0.228)
plt.plot(points, a * np.log(points) + b, label='y = a*ln(x) + b')
def polynomial(x_array, y_array):
# solving next system
# a*x^4 + b*x^3 + c*x^2 - x^2*y = 0
# a*x^3 + b*x^2 + c*x - x*y = 0
# a*x^2 + b*x + c - y = 0
sums = [0, 0, 0, 0, 0, 0, 0]
for i in range(len(x_array)):
sums[0] += x_array[i] ** 4
sums[1] += x_array[i] ** 3
sums[2] += x_array[i] ** 2
sums[3] += x_array[i]
sums[4] += x_array[i] ** 2 * y_array[i]
sums[5] += x_array[i] * y_array[i]
sums[6] += y_array[i]
left = np.array([[sums[0], sums[1], sums[2]], [sums[1], sums[2], sums[3]], [sums[2], sums[3], len(x_array)]])
right = np.array([sums[4], sums[5], sums[6]])
a, b, c = np.linalg.solve(left, right)
deviation = 0
for i in range(len(x_array)):
deviation += (y_array[i] - a * x_array[i] ** 2 - b * x_array[i] - c) ** 2
print('Отклонение для y = a*x*x + b*x + c:', deviation)
points = np.linspace(min(x_array) - 0.228, max(x_array) + 0.228)
plt.plot(points, a * points ** 2 + b * points + c, label='y = a*x*x + b*x + c')
def exponential_function(x_array, y_array):
# solving next system
# a*x^2 + B*x - x*ln(y) = 0
# a*x + B - ln(y) = 0
sums = [0, 0, 0, 0]
for i in range(len(x_array)):
if x_array[i] < 0:
continue
sums[0] += x_array[i] * x_array[i]
sums[1] += x_array[i]
sums[2] += x_array[i] * np.log(y_array[i])
sums[3] += np.log(y_array[i])
left = np.array([[sums[0], sums[1]], [sums[1], len(x_array)]])
right = np.array([sums[2], sums[3]])
a, B = np.linalg.solve(left, right)
# b = e^B
deviation = 0
for i in range(len(x_array)):
deviation += (y_array[i] - np.exp(B) * np.exp(x_array[i] * a)) ** 2
print('Отклонение для y = b*e^(a*x):', deviation)
points = np.linspace(0.1, max(x_array) + 0.228)
plt.plot(points, np.exp(B) * np.exp(points * a), label='y = b*e^(a*x)')
if __name__ == "__main__":
try:
# Построим звезд сегодняшней программы в стройный рядок и начнем наше мероприятие
build_points(x, y)
# Бал начнет простой и прямолинейный человек - Лео
# 1) y = a*x + b
linear_function(x, y)
# Встречайте мужчину без майки, который не прочь воспользоваться химией для набора массы
# 2) y = a/x + b
hyperbolic_function(x, y)
# А далее покажет на что способен юный шотландец - Джон Непер
# 3) y = a*ln(x) + b
logarithmic_function(x, y)
# И наконец гвоздь программы - мисс Полина
# 4) y = a*x*x + b*x + c
polynomial(x, y)
# Завершает наше мероприятие молодой и не женатый сын Эйлера - Эйлер
# 5) y = b*e^(a*x)
exponential_function(x, y)
# Тетя Таня разрешила Леонарду расписать тут кисточки и карандаши
plt.grid()
plt.legend()
plt.show()
except Exception as e:
print(e)
|
"""
a model of T cell homeostasis for two competing clonotypes
This model definition is based on S. Macnamara and K. Burrage's formulation of
Stirk et al's model:
E. R. Stirk, C. Molina-Par and H. A. van den Berg.
Stochastic niche structure and diversity maintenance in the T cell
repertoire. Journal of Theoretical Biology, 255:237-249, 2008.
"""
import math
import numpy
from cmepy import model
def create_model():
"""
create species count state space version of the competing clonotypes model
"""
shape = (50, 50)
# we first define the mappings from the state space to species counts
# this is pretty easy since we choose species count state space
species_count_a = lambda *x : x[0]
species_count_b = lambda *x : x[1]
# we now define the reaction propensities using the species counts
def reaction_a_birth(*x):
"""
propensity of birth reaction for species a
"""
s_a = species_count_a(*x)
s_b = species_count_b(*x)
return numpy.where(s_a + s_b > 0,
60.0*s_a*(numpy.divide(0.5, s_a + s_b) +
numpy.divide(0.5, (s_a + 10*100))),
0.0)
def reaction_a_decay(*x):
return 1.0*species_count_a(*x)
def reaction_b_birth(*x):
"""
propensity of birth reaction for species b
"""
s_a = species_count_a(*x)
s_b = species_count_b(*x)
return numpy.where(s_a + s_b > 0,
60.0*s_b*(numpy.divide(0.5, s_a + s_b) +
numpy.divide(0.5, (s_b + 10*100))),
0.0)
def reaction_b_decay(*x):
return 1.0*species_count_b(*x)
return model.create(
name = 'T Cell clonoTypes',
reactions = (
'*->A',
'A->*',
'*->B',
'B->*',
),
propensities = (
reaction_a_birth,
reaction_a_decay,
reaction_b_birth,
reaction_b_decay,
),
transitions = (
(1, 0),
(-1, 0),
(0, 1),
(0, -1),
),
species = (
'A',
'B',
),
species_counts = (
species_count_a,
species_count_b,
),
shape = shape,
initial_state = (10, 10)
)
def create_time_dependencies():
"""
create time dependencies for the competing clonotypes model
"""
# 0-th and 2-nd reactions are scaled by the following
# time dependent factor
return {frozenset([0, 2]) : lambda t : math.exp(-0.1*t)}
def main():
"""
Solves the competing clonotypes model and plots results
"""
import pylab
from cmepy import solver, recorder
m = create_model()
s = solver.create(
model = m,
sink = True,
time_dependencies = create_time_dependencies()
)
r = recorder.create(
(m.species, m.species_counts)
)
t_final = 15.0
steps_per_time = 1
time_steps = numpy.linspace(0.0, t_final, int(steps_per_time*t_final) + 1)
for t in time_steps:
s.step(t)
p, p_sink = s.y
print 't : %.2f, truncation error : %.2g' % (t, p_sink)
r.write(t, p)
# display a series of contour plots of P(A, B; t) for varying t
measurement = r[('A', 'B')]
epsilon = 1.0e-5
for t, marginal in zip(measurement.times, measurement.distributions):
pylab.figure()
pylab.contourf(
marginal.compress(epsilon).to_dense(m.shape)
)
pylab.title('P(A, B; t = %.f)' % t)
pylab.ylabel('species A count')
pylab.xlabel('species B count')
pylab.show()
|
# Generated by Django 2.0.6 on 2018-07-24 19:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forms', '0005_form_fields'),
]
operations = [
migrations.AddField(
model_name='form',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='form_images'),
),
]
|
"""
파일 이름 : 2443.py
제작자 : 정지운
제작 날짜 : 2017년 7월 28일
프로그램 용도 : 별을 출력한다.
"""
# 입력
num = int(input())
# 출력
for i in range(num):
print(' ' * i, end = '')
print('*' * (2 * (num - i) - 1))
|
""" Analysis Module for Pyneal Real-time Scan
These tools will set up and apply the specified analysis steps to incoming
volume data during a real-time scan
"""
import os
import sys
import logging
import importlib
import numpy as np
import nibabel as nib
class Analyzer:
""" Analysis Class
This is the main Analysis module that gets instantiated by Pyneal, and will
handle executing the specific analyses throughout the scan. The specific
analysis functions that get used will be based on the analyses specified
in the settings dictionary that gets passed in.
"""
def __init__(self, settings):
""" Initialize the class
Parameters
----------
settings : dict
dictionary that contains all of the pyneal settings for the current
session. This dictionary is loaded/configured by the GUI once
Pyneal is first launched
"""
# set up logger
self.logger = logging.getLogger('PynealLog')
# create reference to settings dict
self.settings = settings
### Format the mask. If the settings specify that the the mask should
# be weighted, create separate vars for the weights and mask. Convert
# mask to boolean array
mask_img = nib.load(settings['maskFile'])
if settings['maskIsWeighted'] is True:
self.weightMask = True
self.weights = mask_img.get_fdata().copy()
self.mask = mask_img.get_fdata() > 0
else:
self.weightMask = False
self.mask = mask_img.get_fdata() > 0
### Set the appropriate analysis function based on the settings
if settings['analysisChoice'] == 'Average':
self.analysisFunc = self.averageFromMask
elif settings['analysisChoice'] == 'Median':
self.analysisFunc = self.medianFromMask
else:
# must be a custom analysis script
# get the path to the custom analysis file and import it
customAnalysisDir, customAnalysisName = os.path.split(settings['analysisChoice'])
sys.path.append(customAnalysisDir)
customAnalysisModule = importlib.import_module(customAnalysisName.split('.')[0])
# create instance of customAnalysis class, pass in mask reference
customAnalysis = customAnalysisModule.CustomAnalysis(settings['maskFile'],
settings['maskIsWeighted'],
settings['numTimepts'])
# define the analysis func for the custom analysis (should be 'compute'
# method of the customAnaylsis template)
self.analysisFunc = customAnalysis.compute
def runAnalysis(self, vol, volIdx):
""" Analyze the supplied volume
This is a generic function that Pyneal can call in order to execute the
unique analysis routines setup for this session. The specific analysis
routines are contained in the `analysisFunc` function, and will be
set up by the `__init__` method of this class.
Every analysisFunc will have access to the volume data and the `volIdx`
(0-based index). Not every `analysisFunc` will use the `volIdx` for
anything (e.g. averageFromMask),but is included anyway so that any
custom analysis scripts that need it have access to it
Parameters
----------
vol : numpy-array
3D array of voxel data for the current volume
volIdx : int
0-based index indicating where, in time (4th dimension), the volume
belongs
Returns
-------
output : dict
dictionary containing key:value pair(s) for analysis results
specific to the current volume
"""
self.logger.debug('started volIdx {}'.format(volIdx))
# submit vol and volIdx to the specified analysis function
output = self.analysisFunc(vol, volIdx)
self.logger.info('analyzed volIdx {}'.format(volIdx))
return output
def averageFromMask(self, vol, volIdx):
""" Compute the average voxel activation within the mask.
Note: np.average has weights option, np.mean doesn't
Parameters
----------
vol : numpy-array
3D array of voxel data for the current volume
volIdx : int
0-based index indicating where, in time (4th dimension), the volume
belongs
Returns
-------
dict
{'weightedAverage': ####} or {'average': ####}
"""
if self.weightMask:
result = np.average(vol[self.mask], weights=self.weights[self.mask])
return {'weightedAverage': np.round(result, decimals=2)}
else:
result = np.mean(vol[self.mask])
return {'average': np.round(result, decimals=2)}
def medianFromMask(self, vol, volIdx):
""" Compute the median voxel activation within the mask
Parameters
----------
vol : numpy-array
3D array of voxel data for the current volume
volIdx : int
0-based index indicating where, in time (4th dimension), the volume
belongs
Returns
-------
dict
{'weightedMedian': ####} or {'median': ####}
See Also
--------
Weighted median algorithm from: https://pypi.python.org/pypi/weightedstats/0.2
"""
if self.weightMask:
data = vol[self.mask]
sorted_data, sorted_weights = map(np.array, zip(*sorted(zip(data, self.weights[self.mask]))))
midpoint = 0.5 * sum(sorted_weights)
if any(self.weights[self.mask] > midpoint):
return (data[self.weights == np.max(self.weights)])[0]
cumulative_weight = np.cumsum(sorted_weights)
below_midpoint_index = np.where(cumulative_weight <= midpoint)[0][-1]
if cumulative_weight[below_midpoint_index] == midpoint:
return np.mean(sorted_data[below_midpoint_index:below_midpoint_index + 2])
result = sorted_data[below_midpoint_index + 1]
return {'weightedMedian': np.round(result, decimals=2)}
else:
# take the median of the voxels in the mask
result = np.median(vol[self.mask])
return {'median': np.round(result, decimals=2)}
|
from setuptools import setup, find_packages
with open('requirements.txt') as f:
requirements = [l for l in f.read().splitlines() if l]
setup(
name="senet",
author="Kan HUANG",
# install_requires=requirements,
python_requires=">=3.6",
version="0.0.1",
packages=find_packages()
)
|
from random import sample
from exercises import *
from time import sleep
from datetime import date
today = str(date.today())
log = open("/home/billy/Desktop/Workout " + today + ".txt", "w")
#Create the generator class which will actually create the routines according
#to the desired number of days per week.
class Generator(object):
def get_goal(self):
#Get input from user for goals and store it in a variable called
#"goal".
goal = input("Is your goal to gain strength, endurance, or hypertrophy?\n>>> ")
goal = goal.lower()
#This section changes the information and format printed out
#according to the goal.
while True:
if "strength" in goal:
self.sets = "5-8"
self.target_reps = "1-6"
self.actual_reps = "__/__/__/__/__/__/__/__"
self.template = '| {:^38} | {:^7} | {:^6} | {:^12} | {:^24} |'
break
elif "endurance" in goal:
self.sets = "1-3"
self.target_reps = "15-20"
self.actual_reps = "__/__/__"
self.template = '| {:^50} | {:^7} | {:^6} | {:^12} | {:^12} |'
break
elif "hypertrophy" in goal:
self.sets = "4"
self.target_reps = "8-12"
self.actual_reps = "__/__/__/__"
self.template = '| {:^50} | {:^7} | {:^6} | {:^12} | {:^12} |'
break
else:
print ("Please enter strength, endurance, or hypertrophy.")
goal = input("Is your goal to gain strength, endurance, or hypertrophy?\n>>> ")
goal = goal.lower()
return self.sets, self.target_reps, self.actual_reps, self.template
def get_experience(self):
#Ask the user how much experience they have working out and
#store that number in a variable called "experience".
self.experience = input("How long have you been working out for?\n1. 0-6 months\n2. 6 months - 2 years\n3. 2+ years\n>>> ")
#Loop through the input request until the user gives a number.
#The loop continues until a number is given.
while True:
try:
self.experience = int(self.experience)
except ValueError:
print ("Oops, please enter either 1, 2, or 3.")
self.experience = input("How long have you been working out for?\n1. 0-6 months\n2. 6 months - 2 years\n3. 2+ years\n>>> ")
else:
break
return self.experience
def check_experience(self, experience):
#This function verifies that the number given was within the range of
#options. It can probably be edited out for the mobile versions.
while self.experience > 3 or self.experience < 1:
print("Please choose between choice 1, 2, or 3.")
self.experience = Generator.get_experience(self)
else:
pass
return self.experience
def get_frequency(self):
#Ask the user how many days per week they want to work out and store
#that number in a variable called "days".
self.days = input("How many days would you like to workout this week?\n>>> ")
#Loop through the input request until the user gives a number.
#The loop continues until a number is given.
while True:
try:
self.days = int(self.days)
except ValueError:
print ("Oops, try entering a number, like 3.")
self.days = input("How many days would you like to workout this week?\n>>> ")
else:
break
return self.days
def check_frequency(self, days):
#This function verifies that the number of days to workout is
#between 1 and 6.
while self.days >= 7 or self.days < 1:
if self.days == 7:
print("You need to take at least one rest day per week.")
self.days = Generator.get_frequency(self)
elif self.days < 1:
print("You need to work out at least one day per week.")
self.days = Generator.get_frequency(self)
elif self.days > 7:
print("There are only 7 days per week!")
self.days = Generator.get_frequency(self)
else:
pass
return self.days
#This funciton takes the user's preferences for each given category
#of exercises and if the user says they do like using a given piece of
# equipment/style of working out, then the list of those exercises
#for each muscle group are added to the main list for each muscle group.
#If the user says they don't like a certain type of exercise,
#then the list of those exercises is just ignored. Only exercises
#found in the main exercises list are used when generating the workout.
def user_preference(self, equipment0, equipment1, equipment2, equipment3, equipment4, equipment5, equipment6, equipment7, equipment8, name):
preference = input("Do you like using and/or have access to {!s}?\n>>> ".format(name))
while True:
if "y" in preference:
for exercise in equipment0:
Chest.exercises.append(exercise)
for exercise in equipment1:
Back.exercises.append(exercise)
for exercise in equipment2:
Legs.exercises.append(exercise)
for exercise in equipment3:
Lower_Legs.exercises.append(exercise)
for exercise in equipment4:
Biceps.exercises.append(exercise)
for exercise in equipment5:
Triceps.exercises.append(exercise)
for exercise in equipment6:
Shoulders.exercises.append(exercise)
for exercise in equipment7:
Forearms.exercises.append(exercise)
for exercise in equipment8:
Abs.exercises.append(exercise)
break
elif "n" in preference:
break
else:
print ("Sorry, please try inputting yes or no.")
preference = input("Do you like using and/or have access to {!s}?\n>>> ".format(name))
return preference
def get_preferences(self):
#Here the function is called for each type of exercise to build
#the main exercise list which is the only list considered in
#generating the workout.
Generator.user_preference(self, Chest.selectorized, Back.selectorized, Legs.selectorized, Lower_Legs.selectorized, Biceps.selectorized, Triceps.selectorized, Shoulders.selectorized, Forearms.selectorized, Abs.selectorized, "selectorized equipment")
#In order to remove some repition, and since dumbbells and
#barbells are part of the free weights category, the program will
#only ask the user if they want to use dumbbells and barbells if
#they have already said that they like free weights. Otherwise,
#those two options are skipped.
fwpref = Generator.user_preference(self, Chest.free_weights, Back.free_weights, Legs.free_weights, Lower_Legs.free_weights, Biceps.free_weights, Triceps.free_weights, Shoulders.free_weights, Forearms.free_weights, Abs.free_weights, "free weights")
if "y" in fwpref:
Generator.user_preference(self, Chest.dumbbell, Back.dumbbell, Legs.dumbbell, Lower_Legs.dumbbell, Biceps.dumbbell, Triceps.dumbbell, Shoulders.dumbbell, Forearms.dumbbell, Abs.dumbbell, "dumbbell")
Generator.user_preference(self, Chest.barbell, Back.barbell, Legs.barbell, Lower_Legs.barbell, Biceps.barbell, Triceps.barbell, Shoulders.barbell, Forearms.barbell, Abs.barbell, "barbell")
else:
pass
Generator.user_preference(self, Chest.calisthenics, Back.calisthenics, Legs.calisthenics, Lower_Legs.calisthenics, Biceps.calisthenics, Triceps.calisthenics, Shoulders.calisthenics, Forearms.calisthenics, Abs.calisthenics, "calisthenic exercises")
Generator.user_preference(self, Chest.cable, Back.cable, Legs.cable, Lower_Legs.cable, Biceps.cable, Triceps.cable, Shoulders.cable, Forearms.cable, Abs.cable, "cable equipment")
def workout_title(self, days, experience):
#This function prints out the title of the workout, according to
#how many days the user will workout and their experience.
if experience == 1:
print("-" * 103, file = log)
print('| {:^99} |'.format("Beginner - " + str(days) + " Day Split"), file = log)
elif experience == 2:
print("-" * 103, file = log)
print('| {:^99} |'.format("Intermediate - " + str(days) + " Day Split"), file = log)
elif experience == 3:
print("-" * 103, file = log)
print('| {:^99} |'.format("Advanced - " + str(days) + " Day Split"), file = log)
#The format for the header, taking the name of the workout day as an
#argument.
def header(workout):
print("|", "-" * 99, "|", file = log)
print('| {:^99} |'.format(workout), file = log)
print("|", "-" * 99, "|", file = log)
def section(name):
#This funciton prints out the format for the workout, according to
#which section of the workout is being printed out.
if name == "Warm Ups":
print('| {:<99} |'.format(name), file = log)
print("|", "-" * 99, "|", file = log)
print('| {:^99} |'.format("Refer to the " + name + " section of the app for the muscles you are training."), file = log)
print("|", "-" * 99, "|", file = log)
elif name == "Cool Down":
print("|", "-" * 99, "|", file = log)
print('| {:<99} |'.format(name), file = log)
print("|", "-" * 99, "|", file = log)
print('| {:^99} |'.format("Refer to the " + name + " section of the app for the muscles you are training."), file = log)
else:
print('| {:<99} |'.format(name), file = log)
print("|", "-" * 99, "|", file = log)
#This formats the titles of the columns.
def column_titles(self):
print (self.template.format("Exercise", "Weight", "Sets", "Target Reps", "Actual Reps"), file = log)
print("|", "-" * 99, "|", file = log)
#This closes up the table at the bottom and adds a little note.
def footer():
print("|", "-" * 99, "|", file = log)
print('| {:^99} |'.format("Complete this routine for 2-3 weeks and then come generate a new one!"), file = log)
print("-" * 103, file = log)
#This method prints out all of the exercises for each given muscle group.
def print_exercises(self, muscle_group):
for item in muscle_group:
print (self.template.format(item, '_____', self.sets, self.target_reps, self.actual_reps), file = log)
#The following functions print out the exercises for the given muscle
#groups.
def generate_cardio(self, quantity):
Generator.header("Cardio Day")
Generator.section("Warm Ups")
Generator.section("Workout")
Generator.column_titles(self)
cardio_exercises = sample(Cardio.exercises, quantity)
Generator.print_exercises(self, cardio_exercises)
Generator.section("Cool Down")
def generate_full_body(self, large_muscle, small_muscle):
Generator.header("Full Body Day")
#The sample method grabs a number of random exercises from the
#given list and stores them in a variable according to the exercise.
Generator.section("Warm Ups")
#This section prints out the exercises in a list according to the
#template above.
Generator.section("Workout")
Generator.column_titles(self)
chest_exercises = sample(Chest.exercises, large_muscle)
back_exercises = sample(Back.exercises, large_muscle)
legs_exercises = sample(Legs.exercises, large_muscle)
lower_legs_exercises = sample(Lower_Legs.exercises, small_muscle)
biceps_exercises = sample(Biceps.exercises, small_muscle)
triceps_exercises = sample(Triceps.exercises, small_muscle)
shoulders_exercises = sample(Shoulders.exercises, small_muscle)
forearms_exercises = sample(Forearms.exercises, small_muscle)
abs_exercises = sample(Abs.exercises, small_muscle)
Generator.print_exercises(self, chest_exercises)
Generator.print_exercises(self, back_exercises)
Generator.print_exercises(self, legs_exercises)
Generator.print_exercises(self, lower_legs_exercises)
Generator.print_exercises(self, biceps_exercises)
Generator.print_exercises(self, triceps_exercises)
Generator.print_exercises(self, shoulders_exercises)
Generator.print_exercises(self, forearms_exercises)
Generator.print_exercises(self, abs_exercises)
Generator.section("Cool Down")
def generate_upper_body(self, large_muscle, small_muscle):
Generator.header("Upper Body Day")
Generator.section("Warm Ups")
Generator.section("Workout")
Generator.column_titles(self)
chest_exercises = sample(Chest.exercises, large_muscle)
back_exercises = sample(Back.exercises, large_muscle)
biceps_exercises = sample(Biceps.exercises, small_muscle)
triceps_exercises = sample(Triceps.exercises, small_muscle)
shoulders_exercises = sample(Shoulders.exercises, small_muscle)
forearms_exercises = sample(Forearms.exercises, small_muscle)
Generator.print_exercises(self, chest_exercises)
Generator.print_exercises(self, back_exercises)
Generator.print_exercises(self, biceps_exercises)
Generator.print_exercises(self, triceps_exercises)
Generator.print_exercises(self, shoulders_exercises)
Generator.print_exercises(self, forearms_exercises)
Generator.section("Cool Down")
def generate_lower_body(self, large_muscle, small_muscle):
Generator.header("Lower Body Day")
legs_exercises = sample(Legs.exercises, large_muscle)
lower_legs_exercises = sample(Lower_Legs.exercises, small_muscle)
abs_exercises = sample(Abs.exercises, small_muscle)
Generator.section("Warm Ups")
Generator.section("Workout")
Generator.column_titles(self)
Generator.print_exercises(self, legs_exercises)
Generator.print_exercises(self, lower_legs_exercises)
Generator.print_exercises(self, abs_exercises)
Generator.section("Cool Down")
def generate_chest(self, days, large_muscle, small_muscle):
Generator.header("Chest Day")
chest_exercises = sample(Chest.exercises, large_muscle)
triceps_exercises = sample(Triceps.exercises, small_muscle)
Generator.section("Warm Ups")
Generator.section("Workout")
Generator.column_titles(self)
Generator.print_exercises(self, chest_exercises)
Generator.print_exercises(self, triceps_exercises)
if days == 3:
shoulders_exercises = sample(Shoulders.exercises, small_muscle)
Generator.print_exercises(self, shoulders_exercises)
else:
pass
Generator.section("Cool Down")
def generate_back(self, days, large_muscle, small_muscle):
Generator.header("Back Day")
back_exercises = sample(Back.exercises, large_muscle)
biceps_exercises = sample(Biceps.exercises, small_muscle)
Generator.section("Warm Ups")
Generator.section("Workout")
Generator.column_titles(self)
Generator.print_exercises(self, back_exercises)
Generator.print_exercises(self, biceps_exercises)
if days == 3:
forearms_exercises = sample(Forearms.exercises, small_muscle)
Generator.print_exercises(self, forearms_exercises)
else:
pass
Generator.section("Cool Down")
def generate_legs(self, days, large_muscle, small_muscle):
Generator.header("Leg Day")
legs_exercises = sample(Legs.exercises, large_muscle)
lower_legs_exercises = sample(Lower_Legs.exercises, small_muscle)
Generator.section("Warm Ups")
Generator.section("Workout")
Generator.column_titles(self)
Generator.print_exercises(self, legs_exercises)
Generator.print_exercises(self, lower_legs_exercises)
if days == 3:
abs_exercises = sample(Abs.exercises, small_muscle)
Generator.print_exercises(self, abs_exercises)
else:
pass
Generator.section("Cool Down")
def generate_arms(self, small_muscle):
Generator.header("Arm Day")
shoulders_exercises = sample(Shoulders.exercises, small_muscle)
forearms_exercises = sample(Forearms.exercises, small_muscle)
abs_exercises = sample(Abs.exercises, small_muscle)
Generator.section("Warm Ups")
Generator.section("Workout")
Generator.column_titles(self)
Generator.print_exercises(self, shoulders_exercises)
Generator.print_exercises(self, forearms_exercises)
Generator.print_exercises(self, abs_exercises)
Generator.section("Cool Down")
def create_workout(self, experience, days):
#This function puts all the exercises together according to the format
#given.
Generator.workout_title(self, days, experience)
if experience == 1:
#Beginners will always have a cardio day if it is an even-numbered
#day and a weights day if it is an odd-numbered day. This function
#checks which day it is and provides the workout accordingly.
for day in range(days):
if day % 2 == 0:
Generator.generate_cardio(self, 1)
else:
Generator.generate_full_body(self, 1, 1)
Generator.footer()
elif experience == 2:
#Intermediate lifters should have cardio on every third day and
#weights days on every even pair of days. If only one day is
#requested, then it will be a cardio day.
workout = days // 2
cardio = (days % 2) * workout
if days == 1:
Generator.generate_cardio(self, 3)
elif days < 5:
for day in range(workout):
Generator.generate_upper_body(self, 1, 1)
Generator.generate_lower_body(self, 2, 1)
for day in range(cardio):
Generator.generate_cardio(self, 3)
else:
for day in range(0, 2):
Generator.generate_upper_body(self, 1, 1)
Generator.generate_lower_body(self, 2, 1)
for day in range(0, days - 4):
Generator.generate_cardio(self, 3)
Generator.footer()
elif experience == 3:
#Advanced lifters have more specific rules according to how many
#days per week they can/want to work out. If the user only wants
#to work out 1 day of the week, a full body workout will be
#generated.
if days == 1:
Generator.generate_full_body(self, 1, 1)
#A 2 day split should consist of an upper body and a lower body day.
elif days == 2:
Generator.generate_upper_body(self, 2, 1)
Generator.generate_lower_body(self, 2, 1)
#A 3 day split will have a chest day, back day, and leg day.
elif days == 3:
Generator.generate_chest(self, days, 3, 2)
Generator.generate_back(self, days, 3, 2)
Generator.generate_legs(self, days, 3, 2)
#A 4 day split should have a Chest Day, Back Day, Leg Day,
#and Shoulder/Forearm/Ab Day. Any additional days should just be
#cardio days.
elif days >= 4:
Generator.generate_chest(self, days, 3, 2)
Generator.generate_back(self, days, 3, 2)
Generator.generate_legs(self, days, 3, 2)
Generator.generate_arms(self, 2)
for day in range(0, days - 4):
Generator.generate_cardio(self, 3)
Generator.footer()
class Engine(object):
#This function runs all of the functions required to make the program run.
def start(Generator):
Generator.get_goal()
Generator.get_preferences()
experience = Generator.get_experience()
experience = Generator.check_experience(experience)
days = Generator.get_frequency()
days = Generator.check_frequency(days)
Generator.create_workout(experience, days)
log.close()
gen1 = Generator()
Engine.start(gen1)
|
import FWCore.ParameterSet.Config as cms
electronHcalTowerIsolationScone = cms.EDProducer("EgammaTowerIsolationProducer",
absolut = cms.bool(True),
intRadius = cms.double(0.15), # to be orthogonal with the H/E ID cut
extRadius = cms.double(0.3),
towerProducer = cms.InputTag("towerMaker"),
etMin = cms.double(0.0),
Depth = cms.int32(-1),
emObjectProducer = cms.InputTag("gedGsfElectrons")
)
electronHcalDepth1TowerIsolationScone = cms.EDProducer("EgammaTowerIsolationProducer",
absolut = cms.bool(True),
intRadius = cms.double(0.15), # to be orthogonal with the H/E ID cut
extRadius = cms.double(0.3),
towerProducer = cms.InputTag("towerMaker"),
etMin = cms.double(0.0),
Depth = cms.int32(1),
emObjectProducer = cms.InputTag("gedGsfElectrons")
)
electronHcalDepth2TowerIsolationScone = cms.EDProducer("EgammaTowerIsolationProducer",
absolut = cms.bool(True),
intRadius = cms.double(0.15), # to be orthogonal with the H/E ID cut
extRadius = cms.double(0.3),
towerProducer = cms.InputTag("towerMaker"),
etMin = cms.double(0.0),
Depth = cms.int32(2),
emObjectProducer = cms.InputTag("gedGsfElectrons")
)
|
import logging
import os
import sys
from db.setup import SessionWrapper
from logger import logging_config
from orm.personality_tables import *
from save_liwc_scores.orm.scores_tables import *
from twit_personality.training.datasetUtils import parseFastText
from twit_personality.training.embeddings import transformTextForTesting
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals import joblib
import numpy as np
#References
#[1] T. Yarkoni, "Personality in 100,000 words: A large-scale analysis of personality and word use among bloggers," Journal of research in
#personality, vol. 44, no. 3, pp.363-373, 2010
def get_openness(score):
value = ( - 0.21 * score.pronoun - 0.16 * score.i - 0.1 * score.we - 0.12 * score.you - 0.13 * score.negate - 0.11 * score.assent
+ 0.2 * score.article - 0.12 * score.affect - 0.15 * score.posemo
- 0.12 * score.discrep - 0.08 * score.hear - 0.14 * score.social - 0.17 * score.family - 0.22 * score.time
- 0.11 * score.space - 0.22 * score.motion - 0.17 * score.leisure
- 0.2 * score.home + 0.15 * score.death - 0.15 * score.ingest)
if tool == 'liwc07':
#categories indicated by Yarkoni[1] missing in liwc 2007 dictionary: first person, positive feelings, sensory processes, other references,
# sports, physical states, sleep, grooming
value = value + 0.17 * score.preps - 0.09 * score.cogmech - 0.16 * score.past - 0.16 * score.present - 0.09 * score.humans - 0.11 * score.incl
elif tool == 'liwc15':
#categories indicated by Yarkoni[1] missing in liwc 2015 dictionary: first person, positive feelings, sensory processes, other references,
# humans, inclusive, sports, physical states, sleep, grooming
value = value + 0.17 * score.prep - 0.09 * score.cogproc - 0.16 * score.focuspast - 0.16 * score.focuspresent
return value
def get_conscientiousness(score):
value = ( - 0.17 * score.negate - 0.18 * score.negemo - 0.19 * score.anger - 0.11 * score.sad - 0.12 * score.cause
- 0.13 * score.discrep - 0.1 * score.tentat - 0.1 * score.certain - 0.12 * score.hear + 0.09 * score.time + 0.14 * score.achieve
- 0.12 * score.death - 0.14 * score.swear)
if tool == 'liwc07':
#categories indicated by Yarkoni[1] missing in liwc 2007 dictionary: sensory processes, music
value = value - 0.11 * score.cogmech - 0.12 * score.humans - 0.16 * score.excl
elif tool == 'liwc15':
#categories indicated by Yarkoni[1] missing in liwc 2015 dictionary: sensory processes, humans, exclusive, music
value = value - 0.11 * score.cogproc
return value
def get_extraversion(score):
value = ( 0.11 * score.we + 0.16 * score.you - 0.12 * score.number + 0.1 * score.posemo - 0.09 * score.cause - 0.11 * score.tentat
+ 0.1 * score.certain + 0.12 * score.hear + 0.15 * score.social + 0.15 * score.friend + 0.09 * score.family
- 0.08 * score.work - 0.09 * score.achieve + 0.08 * score.leisure + 0.11 * score.relig + 0.1 * score.body
+ 0.17 * score.sexual)
if tool == 'liwc07':
#categories indicated by Yarkoni[1] missing in liwc 2007 dictionary: positive feelings, sensory processes, communication, other references,
# occupation, music, physical states
value = value + 0.13 * score.humans + 0.09 * score.incl
#categories indicated by Yarkoni[1] missing in liwc 2015 dictionary: positive feelings, sensory processes, communication, other references, humans,
# inclusive, occupation, music, physical states
return value
def get_agreeableness(score):
value = ( 0.11 * score.pronoun + 0.18 * score.we + 0.11 * score.number + 0.18 * score.posemo - 0.15 * score.negemo - 0.23 * score.anger
- 0.11 * score.cause + 0.09 * score.see + 0.1 * score.feel + 0.13 * score.social + 0.11 * score.friend + 0.19 * score.family
+ 0.12 * score.time + 0.16 * score.space + 0.14 * score.motion + 0.15 * score.leisure + 0.19 * score.home
- 0.11 * score.money - 0.13 * score.death + 0.09 * score.body + 0.08 * score.sexual - 0.21 * score.swear)
if tool == 'liwc07':
#categories indicated by Yarkoni[1] missing in liwc 2007 dictionary: positive feelings, other references, music, physical states, sleep
value = value + 0.1 * score.past + 0.18 * score.incl
elif tool == 'liwc15':
#categories indicated by Yarkoni[1] missing in liwc 2015 dictionary: positive feelings, other references, inclusive, music, physical states, sleep
value = value + 0.1 * score.focuspast
return value
def get_neuroticism(score):
value = ( 0.12 * score.i - 0.15 * score.you + 0.11 * score.negate - 0.11 * score.article + 0.16 * score.negemo + 0.17 * score.anx
+ 0.13 * score.anger + 0.1 * score.sad + 0.11 * score.cause + 0.13 * score.discrep
+ 0.12 * score.tentat + 0.13 * score.certain + 0.1 * score.feel - 0.08 * score.friend - 0.09 * score.space + 0.11 + score.swear)
if tool == 'liwc07':
#categories indicated by Yarkoni[1] missing in liwc 2007 dictionary: first person, other references, sleep
value = value + 0.13 * score.cogmech + 0.1 * score.excl
elif tool == 'liwc15':
#categories indicated by Yarkoni[1] missing in liwc 2015 dictionary: first person, other references, exclusive, sleep
value = value + 0.13 * score.cogproc
return value
def get_profile_liwc():
if tool == 'liwc07':
scores = session.query(Liwc2007Scores)
elif tool == 'liwc15':
scores = session.query(Liwc2015Scores)
for score in scores:
big5 = {}
big5['openness'] = get_openness(score)
big5['conscientiousness'] = get_conscientiousness(score)
big5['extraversion'] = get_extraversion(score)
big5['agreeableness'] = get_agreeableness(score)
big5['neuroticism'] = get_neuroticism(score)
if tool == 'liwc07':
lpm = Liwc2007ProjectMonth(dev_uid=score.dev_uid, project_name=score.project_name, month=score.month,
email_count=score.email_count, word_count=score.wc, scores=big5)
elif tool == 'liwc15':
lpm = Liwc2015ProjectMonth(dev_uid=score.dev_uid, project_name=score.project_name, month=score.month,
email_count=score.email_count, word_count=score.wc, scores=big5)
session.add(lpm)
session.commit()
def get_profile_twit_pers():
dataset_path = "twit_personality/FastText/dataset.vec"
tweet_threshold = 3
vectorizer = CountVectorizer(stop_words="english", analyzer="word")
analyzer = vectorizer.build_analyzer()
logger.info('Loading embeddings dataset...')
wordDictionary = parseFastText(dataset_path)
logger.info('Data successfully loaded.')
content = os.listdir("../export_content/content/")
big5 = {}
i = 0
for file in content:
filename = file.split('_')
uid = filename[0]
p_name = filename[1]
_month = filename[2].replace(".txt", "")
lines=[]
with open('../export_content/content/'+file) as f:
for line in f:
lines.append(line)
try:
content = open("../export_content/content/"+file, "r").read()
content = transformTextForTesting(wordDictionary, tweet_threshold, lines, "conc")
logger.info("Embeddings computed.")
except:
logger.info("Not enough words for prediction.")
continue
scores = {}
for trait in ["O","C","E","A","N"]:
model = joblib.load("twit_personality/training/Models/SVM/SVM_"+trait+".pkl")
preds = model.predict(content)
scores[trait] = float(str(np.mean(np.array(preds) ) ) [0:5] )
big5['openness'] = scores["O"]
big5['conscientiousness'] = scores["C"]
big5['extraversion'] = scores["E"]
big5['agreeableness'] = scores["A"]
big5['neuroticism'] = scores["N"]
tpm = TwitPersProjectMonth(dev_uid=uid, project_name=p_name, month=_month,
email_count=None, word_count=None, scores=big5)
session.add(tpm)
session.commit()
def reset_table():
if tool == 'liwc07':
session.query(Liwc2007ProjectMonth).delete()
elif tool == 'liwc15':
session.query(Liwc2015ProjectMonth).delete()
elif tool == 'twitPers':
session.query(TwitPersProjectMonth).delete()
logger.info('Done resetting table')
if __name__ == '__main__':
logger = logging_config.get_logger('big5', console_level=logging.DEBUG)
SessionWrapper.load_config('../db/cfg/setup.yml')
session = SessionWrapper.new(init=True)
if len(sys.argv) >= 2:
tool = sys.argv[1]
else:
logger.error('Missing mandatory first param for tool: \'liwc07\', \'liwc15\' or \'twitPers\' expected')
sys.exit(-1)
try:
reset_table()
if tool == 'liwc07' or tool == 'liwc15':
get_profile_liwc()
elif tool == 'twitPers':
get_profile_twit_pers()
logger.info('Done getting personality scores')
except KeyboardInterrupt:
logger.error('Received Ctrl-C or other break signal. Exiting.')
|
"""
Created on Mon Apr 14 15:48:29 2014
@author: Vasanthi Vuppuluri
Original code inspired by: Tanmay Thakur
"""
# PURPOSE:
#---------
# This is designed for the new Azure Marketplace Bing Search API (released Aug 2012)
#
# Inspired by https://github.com/mlagace/Python-SimpleBing and
# http://social.msdn.microsoft.com/Forums/pl-PL/windowsazuretroubleshooting/thread/450293bb-fa86-46ef-be7e-9c18dfb991ad
import requests # Get from https://github.com/kennethreitz/requests
import string
import json
from urllib.parse import urlencode
from random import randint, sample
import time
from urllib.request import quote
import os
import http, urllib
class InvalidKeyException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def _cache_abs_path(cache_rel_path):
script_dir = os.path.dirname(__file__)
return os.path.join(script_dir, cache_rel_path)
class BingSearchAPI():
#bing_api = "https://api.datamarket.azure.com/Data.ashx/Bing/Search/Composite?" # Composite options searches everywhere, i.e. {web+image+video+news+spell}
def __init__(self, key):
self.key = key
self.diction = {}
# Set up a cache to remember the total number of hit searches retried
# Update the diction if search_phrase is not found
with open(_cache_abs_path("cache/bing_search_totals.cache"), 'r') as f:
for line in f:
phrase, hit = line.split('/----/')
try:
hit = ''.join(filter(lambda x: x.isdigit(), hit))
self.diction[phrase] = int(hit)
except Exception as e:
print("Diction cache error for " + hit)
def _set_Bing_API_key(self, key):
self.key = key
def replace_symbols(self, request):
# Custom urlencoder.
# They specifically want %27 as the quotation which is a single quote '
# We're going to map both ' and " to %27 to make it more python-esque
request = request.replace("'", '%27')
request = request.replace('"', '%27')
request = request.replace('+', '%2b')
request = request.replace(' ', '%20')
request = request.replace(':', '%3a')
return request
def search(self, params):
headers = {
# Request headers
'Ocp-Apim-Subscription-Key': self.key,
}
conn = http.client.HTTPSConnection('api.cognitive.microsoft.com')
conn.request("GET", "/bing/v5.0/search?%s" % params, "{body}", headers)
response = conn.getresponse()
data = response.read().decode('utf-8')
result = json.loads(data)
conn.close()
return result
def search_total(self, _verbose, _search_phrase):
#_search_phrase_parsed = "%22" + _search_phrase.replace(' ', '+').strip(' ') + "%22" # %22 acts as quotes, facilitating a phrase search
#_search_phrase_parsed = "%22" + quote(_search_phrase.strip(' ')) + "%22"
#_bing_parameters = {'$format': 'json', '$top': 2}
_search_phrase_parsed = '"' + _search_phrase.strip(' ') + '"'
_bing_parameters = urllib.parse.urlencode({
# Request parameters
'q': _search_phrase_parsed,
'count': '2',
'offset': '0',
'mkt': 'en-us',
'safesearch': 'Moderate',
})
if _search_phrase in self.diction:
return self.diction[_search_phrase], self.key
else:
with open(_cache_abs_path("cache/bing_search_totals.cache"), 'a') as f:
count = 0
while True:
count = count + 1
try:
res = self.search(_bing_parameters)
if "statusCode" in res:
raise InvalidKeyException(res["statusCode"])
if "webPages" in res:
total_search_results = res["webPages"]["totalEstimatedMatches"]
else:
total_search_results = 0
if _verbose:
print('-----' + str(total_search_results) + '-----------')
total = int(total_search_results)
if(isinstance(total, int)):
if _verbose:
print('\t', _search_phrase_parsed.replace('+', ' ').replace('%22', ''), total)
pass
print("%s/----/%d" % (_search_phrase, total), file = f)
self.diction[_search_phrase] = total
return total, self.key
except InvalidKeyException as e:
if _verbose:
print('\tERROR: in bing.search() - search total\n\t' + str(e))
print('\tEither network connection error or Bing Api key expired. Search phrase: ' + _search_phrase_parsed)
if count < 10:
with open(_cache_abs_path("cache/Bing_API_keys.cache")) as keys_file:
keys = list()
for line in keys_file:
keys.append(line)
self.key = ''.join(filter(lambda x: (ord(x) < 128), sample(keys, 1)[0].strip(' \t\n\r')))
else:
#self.key = input("Please enter another Bing API key: ")
count = 0
exit(-1)
except Exception as e:
if _verbose:
print('\tERROR: in bing.search() - search total\n\t' + str(e))
print('\tERROR: in bing.search() - search total\n\t' + str(e))
#self.key = input("Please enter another Bing API key: ")
count = 0
exit(-1)
#return 0, self.key
|
"""
Command-line argument parsing with structured printing.
See the argparse python module for details to extend argument parsing.
\LegalBegin
Copyright 2019-2020 Aether Instinct LLC. All Rights Reserved
Licensed under the MIT License (the "License").
You may not use this file except in compliance with the License. You may
obtain a copy of the License at:
https://opensource.org/licenses/MIT
The software is provided "AS IS", without warranty of any kind, express or
implied, including but not limited to the warranties of merchantability,
fitness for a particular purpose and noninfringement. in no event shall the
authors or copyright holders be liable for any claim, damages or other
liability, whether in an action of contract, tort or otherwise, arising from,
out of or in connection with the software or the use or other dealings in the
software.
\LegalEnd
"""
import argparse
import textwrap
class SmartFormatter(argparse.RawTextHelpFormatter):
""" Extend the argparse formatter to support indented multiline help. """
def _split_lines(self, text, width):
"""
Smart split of text.
Parameters:
text String.
width Maximum width.
Returns:
Returns list of lines.
"""
if text.startswith('R|'):
return text[2:].splitlines()
else:
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
def add_subparsers(argparser, helptext):
"""
Conditionally add subparsers to argument parser.
An ArgumentParser can only have one assigned subparsers.
The subparsers object is added to the argparser attributes.
argparser.subparsers
Parameters:
argparser ArgumentParser object.
helptext Help text.
Returns:
Returns subparsers object.
"""
try:
if argparser.subparsers is None:
argparser.subparsers = argparser.add_subparsers(help=helptext)
except AttributeError:
argparser.subparsers = argparser.add_subparsers(help=helptext)
return argparser.subparsers
|
import unittest
from conans.test.utils.tools import TestClient
from conans.paths import CONANFILE
from conans.test.utils.conanfile import TestConanFile
class VersionRangesErrorTest(unittest.TestCase):
def verbose_version_test(self):
client = TestClient()
conanfile = TestConanFile("MyPkg", "0.1", requires=["MyOtherPkg/[~0.1]@user/testing"])
client.save({CONANFILE: str(conanfile)})
error = client.run("install --build", ignore_error=True)
self.assertTrue(error)
self.assertIn("from requirement 'MyOtherPkg/[~0.1]@user/testing'", client.user_io.out)
|
class _RabbitConfig:
"""Parameters for connecting to the RabbitMQ server.
Properties:
* host [str] - The RabbitMQ server's host name (e.g. "localhost")"""
def __init__(self, host=None):
self.host = host
|
# Generated by Django 3.0.7 on 2020-09-04 12:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("ig_guidance", "0001_initial"),
]
operations = [
migrations.AlterModelOptions(
name="igtemplate", options={"verbose_name": "Template"},
),
]
|
# 计算根号x的值
x = 2
# 在不引入虚数的前提下小于0的数没有平方根
if x < 0:
print('ERROR')
else:
# 设置精度要求
wuqiongxiao = 0.001
a = x / 2
if a<wuqiongxiao:
a = wuqiongxiao
# 最多迭代40000次,防止陷入死循环
for i in range(40000):
b = x / a
delta = a - b
a = (a + b) /2
if delta < 0:
delta = -1*delta
# 若满足误差条件,则退出循环
if delta <= wuqiongxiao:
break
# 输出最终结果
print(a)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from urllib.parse import urljoin
from typing import List
from base_parser import BaseParser
class GamespotCom_Parser(BaseParser):
def _parse(self) -> List[str]:
url = f'https://www.gamespot.com/search/?i=site&q={self.game_name}'
root = self.send_get(url, return_html=True)
for game_block_preview in root.select('.media-body'):
if not game_block_preview.select_one('.media-date'):
continue
a = game_block_preview.select_one('.media-title a')
title = self.get_norm_text(a)
if not self.is_found_game(title):
continue
url_game = urljoin(url, a['href'])
self.log_info(f'Load {url_game!r}')
game_block = self.send_get(url_game, return_html=True)
tag_object_stats = game_block.select_one('#object-stats-wrap')
if not tag_object_stats:
return []
genres = [self.get_norm_text(a) for a in tag_object_stats.select('a[href]') if '/genre/' in a['href']]
# Сойдет первый, совпадающий по имени, вариант
return genres
self.log_info(f'Not found game {self.game_name!r}')
return []
def get_game_genres(game_name: str, *args, **kwargs) -> List[str]:
return GamespotCom_Parser(*args, **kwargs).get_game_genres(game_name)
if __name__ == '__main__':
from common import _common_test
_common_test(get_game_genres)
# Search 'Hellgate: London'...
# Genres: ['Role-Playing']
#
# Search 'The Incredible Adventures of Van Helsing'...
# Genres: ['Role-Playing', 'Action']
#
# Search 'Dark Souls: Prepare to Die Edition'...
# Genres: []
#
# Search 'Twin Sector'...
# Genres: ['Action', 'Adventure']
#
# Search 'Call of Cthulhu: Dark Corners of the Earth'...
# Genres: ['Adventure', 'Survival', '3D', 'Action']
|
import sys
from crdt.ops import OpAddRightLocal
from crdt_app import CRDTApp
def run_p2p(my_addr, known_peers, encrypt=False, priv_key=None, my_cookie=None, other_cookies=None):
auth_cookies = None
if other_cookies is not None:
auth_cookies = dict(zip(known_peers, other_cookies))
app = CRDTApp(my_addr.replace('.', '')[-6:], 8889, my_addr,
ops_to_do=[OpAddRightLocal(my_addr[-1])] * 1, known_peers=known_peers,
encrypt=encrypt, priv_key=priv_key, auth_cookies=auth_cookies, my_cookie=my_cookie)
if __name__ == '__main__':
args = sys.argv
print('got args {}'.format(args))
if len(args) == 4:
# without Tor
run_p2p(
args[1],
[] if args[2] == ':' else args[2].split(':')[1:-1],
encrypt=args[3] == '1'
)
elif len(args) == 5:
# with Tor
run_p2p(
args[1],
[] if args[2] == ':' else args[2].split(':')[1:-1],
encrypt=args[3] == '1',
priv_key=args[4]
)
else:
# with Tor + auth
run_p2p(
args[1],
[] if args[2] == ':' else args[2].split(':')[1:-1],
encrypt=args[3] == '1',
priv_key=args[4],
my_cookie=args[5],
other_cookies=None if args[6] == '|' else args[6].split('|')[1:-1]
)
|
from channels.generic.websocket import JsonWebsocketConsumer
class UpdatesConsumer(JsonWebsocketConsumer):
def connect(self):
self.accept()
def receive_json(self, content):
if 'type' in content and content['type'] == 'keep-alive':
self.send_json({'type': 'keep-alive'})
def disconnect(cls, message, **kwargs):
pass
|
import argparse
import colorsys
import math
import os
import random
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pyglet
import trimesh
from PIL import Image, ImageEnhance
from tqdm import tqdm
from OpenGL.GL import GL_LINEAR_MIPMAP_LINEAR
import pyrender
from archiver import Archiver, SceneData
from pyrender import (DirectionalLight, Mesh, Node, OffscreenRenderer,
PerspectiveCamera, PointLight, RenderFlags, Scene,
Primitive)
texture_directory = os.path.join(os.path.dirname(__file__), "..", "textures")
object_directory = os.path.join(os.path.dirname(__file__), "objects")
floor_textures = [
"{}/lg_floor_d.tga".format(texture_directory),
"{}/lg_style_01_floor_blue_d.tga".format(texture_directory),
"{}/lg_style_01_floor_orange_bright_d.tga".format(texture_directory),
]
wall_textures = [
"{}/lg_style_01_wall_cerise_d.tga".format(texture_directory),
"{}/lg_style_01_wall_green_bright_d.tga".format(texture_directory),
"{}/lg_style_01_wall_red_bright_d.tga".format(texture_directory),
"{}/lg_style_02_wall_yellow_d.tga".format(texture_directory),
"{}/lg_style_03_wall_orange_bright_d.tga".format(texture_directory),
]
objects = [
pyrender.objects.Capsule,
pyrender.objects.Cylinder,
pyrender.objects.Icosahedron,
pyrender.objects.Box,
pyrender.objects.Sphere,
]
def set_random_texture(node, path):
texture_image = Image.open(path).convert("RGB")
primitive = node.mesh.primitives[0]
assert isinstance(primitive, Primitive)
primitive.material.baseColorTexture.source = texture_image
primitive.material.baseColorTexture.sampler.minFilter = GL_LINEAR_MIPMAP_LINEAR
def build_scene(floor_textures, wall_textures, fix_light_position=False):
scene = Scene(
bg_color=np.array([153 / 255, 226 / 255, 249 / 255]),
ambient_light=np.array([0.5, 0.5, 0.5, 1.0]))
floor_trimesh = trimesh.load("{}/floor.obj".format(object_directory))
mesh = Mesh.from_trimesh(floor_trimesh, smooth=False)
node = Node(
mesh=mesh,
rotation=pyrender.quaternion.from_pitch(-math.pi / 2),
translation=np.array([0, 0, 0]))
texture_path = random.choice(floor_textures)
set_random_texture(node, texture_path)
scene.add_node(node)
texture_path = random.choice(wall_textures)
wall_trimesh = trimesh.load("{}/wall.obj".format(object_directory))
mesh = Mesh.from_trimesh(wall_trimesh, smooth=False)
node = Node(mesh=mesh, translation=np.array([0, 1.15, -3.5]))
set_random_texture(node, texture_path)
scene.add_node(node)
mesh = Mesh.from_trimesh(wall_trimesh, smooth=False)
node = Node(
mesh=mesh,
rotation=pyrender.quaternion.from_yaw(math.pi),
translation=np.array([0, 1.15, 3.5]))
set_random_texture(node, texture_path)
scene.add_node(node)
mesh = Mesh.from_trimesh(wall_trimesh, smooth=False)
node = Node(
mesh=mesh,
rotation=pyrender.quaternion.from_yaw(-math.pi / 2),
translation=np.array([3.5, 1.15, 0]))
set_random_texture(node, texture_path)
scene.add_node(node)
mesh = Mesh.from_trimesh(wall_trimesh, smooth=False)
node = Node(
mesh=mesh,
rotation=pyrender.quaternion.from_yaw(math.pi / 2),
translation=np.array([-3.5, 1.15, 0]))
set_random_texture(node, texture_path)
scene.add_node(node)
light = DirectionalLight(color=np.ones(3), intensity=10)
if fix_light_position == True:
translation = np.array([1, 1, 1])
else:
xz = np.random.uniform(-1, 1, size=2)
translation = np.array([xz[0], 1, xz[1]])
yaw, pitch = compute_yaw_and_pitch(translation)
node = Node(
light=light,
rotation=genearte_camera_quaternion(yaw, pitch),
translation=translation)
scene.add_node(node)
return scene
def place_objects(scene,
colors,
objects,
max_num_objects=3,
min_num_objects=1,
discrete_position=False,
rotate_object=False):
# Place objects
directions = [-1.5, 0.0, 1.5]
available_positions = []
for z in directions:
for x in directions:
available_positions.append((x, z))
available_positions = np.array(available_positions)
num_objects = random.choice(range(min_num_objects, max_num_objects + 1))
indices = np.random.choice(
np.arange(len(available_positions)), replace=False, size=num_objects)
for xz in available_positions[indices]:
node = random.choice(objects)()
node.mesh.primitives[0].color_0 = random.choice(colors)
if discrete_position == False:
xz += np.random.uniform(-0.3, 0.3, size=xz.shape)
if rotate_object:
yaw = np.random.uniform(0, math.pi * 2, size=1)[0]
rotation = pyrender.quaternion.from_yaw(yaw)
parent = Node(
children=[node],
rotation=rotation,
translation=np.array([xz[0], 0, xz[1]]))
else:
parent = Node(
children=[node], translation=np.array([xz[0], 0, xz[1]]))
scene.add_node(parent)
def udpate_vertex_buffer(cube_nodes):
for node in (cube_nodes):
node.mesh.primitives[0].update_vertex_buffer_data()
def compute_yaw_and_pitch(vec):
x, y, z = vec
norm = np.linalg.norm(vec)
if z < 0:
yaw = math.pi + math.atan(x / z)
elif x < 0:
if z == 0:
yaw = math.pi * 1.5
else:
yaw = math.pi * 2 + math.atan(x / z)
elif z == 0:
yaw = math.pi / 2
else:
yaw = math.atan(x / z)
pitch = -math.asin(y / norm)
return yaw, pitch
def genearte_camera_quaternion(yaw, pitch):
quaternion_yaw = pyrender.quaternion.from_yaw(yaw)
quaternion_pitch = pyrender.quaternion.from_pitch(pitch)
quaternion = pyrender.quaternion.multiply(quaternion_pitch, quaternion_yaw)
quaternion = quaternion / np.linalg.norm(quaternion)
return quaternion
def main():
try:
os.makedirs(args.output_directory)
except:
pass
last_file_number = args.initial_file_number + args.total_scenes // args.num_scenes_per_file - 1
initial_file_number = args.initial_file_number
if os.path.isdir(args.output_directory):
files = os.listdir(args.output_directory)
for name in files:
number = int(name.replace(".h5", ""))
if number > last_file_number:
continue
if number < args.initial_file_number:
continue
if number < initial_file_number:
continue
initial_file_number = number + 1
total_scenes_to_render = args.total_scenes - args.num_scenes_per_file * (
initial_file_number - args.initial_file_number)
assert args.num_scenes_per_file <= total_scenes_to_render
# Colors
colors = []
for n in range(args.num_colors):
hue = n / args.num_colors
saturation = 1
lightness = 1
red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
colors.append(np.array((red, green, blue, 1)))
renderer = OffscreenRenderer(
viewport_width=args.image_size, viewport_height=args.image_size)
archiver = Archiver(
directory=args.output_directory,
num_scenes_per_file=args.num_scenes_per_file,
image_size=(args.image_size, args.image_size),
num_observations_per_scene=args.num_observations_per_scene,
initial_file_number=initial_file_number)
for scene_index in tqdm(range(total_scenes_to_render)):
scene = build_scene(
floor_textures,
wall_textures,
fix_light_position=args.fix_light_position)
place_objects(
scene,
colors,
objects,
max_num_objects=args.max_num_objects,
discrete_position=args.discrete_position,
rotate_object=args.rotate_object)
camera_distance = 4.5
camera = PerspectiveCamera(yfov=math.pi / 4)
camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
scene.add_node(camera_node)
scene_data = SceneData((args.image_size, args.image_size),
args.num_observations_per_scene)
for observation_index in range(args.num_observations_per_scene):
rand_position_xz = np.random.normal(size=2)
rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
rand_position_xz)
# Compute yaw and pitch
camera_direction = np.array(
[rand_position_xz[0], 0, rand_position_xz[1]])
yaw, pitch = compute_yaw_and_pitch(camera_direction)
camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
camera_position = np.array(
[rand_position_xz[0], 1, rand_position_xz[1]])
camera_node.translation = camera_position
# Rendering
flags = RenderFlags.SHADOWS_DIRECTIONAL
if args.anti_aliasing:
flags |= RenderFlags.ANTI_ALIASING
image = renderer.render(scene, flags=flags)[0]
scene_data.add(image, camera_position, math.cos(yaw),
math.sin(yaw), math.cos(pitch), math.sin(pitch))
if args.visualize:
plt.clf()
plt.imshow(image)
plt.pause(1e-10)
archiver.add(scene_data)
renderer.delete()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--total-scenes", "-total", type=int, default=2000000)
parser.add_argument("--num-scenes-per-file", type=int, default=2000)
parser.add_argument("--initial-file-number", type=int, default=1)
parser.add_argument("--num-observations-per-scene", type=int, default=10)
parser.add_argument("--image-size", type=int, default=64)
parser.add_argument("--max-num-objects", type=int, default=3)
parser.add_argument("--num-colors", type=int, default=6)
parser.add_argument("--output-directory", type=str, required=True)
parser.add_argument("--anti-aliasing", default=False, action="store_true")
parser.add_argument(
"--discrete-position", default=False, action="store_true")
parser.add_argument("--rotate-object", default=False, action="store_true")
parser.add_argument(
"--fix-light-position", default=False, action="store_true")
parser.add_argument("--visualize", default=False, action="store_true")
args = parser.parse_args()
main()
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
'''
@Time : 2021/03/16 10:35:28
@Author : Camille
@Version : 0.7beta
'''
from concurrent import futures
import socket
from sys import argv
import time
import threading
import struct
import uuid
import json
import os, subprocess
from concurrent.futures import thread, ThreadPoolExecutor
import queue
from auxiliaryTools import PrettyCode, ChangeRedis, BasicLogs
class Client():
def __init__(self) -> None:
socket.setdefaulttimeout(5)
self.config = Client._loadingConfig()
self.maternalIpInfo = None
self.tcpClientSocket = None
# 线程配置
self.event = threading.Event()
self.lock = threading.Lock()
self.tcpOnline = queue.Queue(1)
# 任务池
self.taskPool = ThreadPoolExecutor(max_workers=10)
# 报文信息(任务汇报)
self.initializationTaskInfo = {
'flag': None,
'code': None,
'working': None,
'complete': [],
'oncall': []
}
# 实例化日志
logName = 'ant_{}.log'.format(time.strftime('%S_%M_%H_%d_%m_%Y'))
self.logObj = BasicLogs.handler(logName=logName)
# 实例化redis
self.redisObj = ChangeRedis(self.config.get('redisConfig'))
PrettyCode.prettyPrint('redis server 连接成功。')
# 启动前检查
self.checkBeforeStarting()
# 日志开关
self.logEncrypt = True
def checkBeforeStarting(self):
# 运行前的一些检查,防止错误启动
# 端口检查
pid = self._checkPort(6655)
if pid:
process = self._findProcess(pid)
self._killProcess(process)
def recvMsg(self) -> None:
# udp
self.udpClientSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while 1:
if self.maternalIpInfo:
# TCP创建完成后才能拿到地址
try:
self.udpClientSocket.bind(self.maternalIpInfo)
except Exception as e:
# UDP连接异常排查
self.logObj.logHandler().error(e)
self.checkBeforeStarting()
time.sleep(5)
self.udpClientSocket.bind(self.maternalIpInfo)
break
continue
PrettyCode.prettyPrint('UDP对象创建成功。')
# 永久等待信息下发
self.udpClientSocket.settimeout(None)
while 1:
try:
data = self.udpClientSocket.recvfrom(1024)
recvMsg = data[0].decode('utf-8')
except Exception as e:
# 数据意料之外的情况
self.logObj.logHandler().error(e)
# 通知重发
self.sendMsg(self.makeInfoMsg(self._structureADH33Msg(3, recvMsg)))
if recvMsg:
msg = '数据已接收:{}\n'.format(recvMsg)
logMsg = 'Data received - {}'.format(recvMsg)
self.logObj.logHandler().info(logMsg)
PrettyCode.prettyPrint(msg)
# 正常应答
self.sendMsg(self.makeInfoMsg(self._structureADH33Msg(1, recvMsg)))
# 判断信息类型
if recvMsg.startswith('AC'):
# redis任务编码信息
tips = '开始执行任务,任务编号: {}'.format(msg)
PrettyCode.prettyPrint(tips)
# 执行任务
self._performOrderRedis(recvMsg)
else:
# cmd指令
self._performOrderCMD(recvMsg)
recvMsg
continue
self.udpClientSocket.close()
def sendMsg(self, msg) -> None:
"""构建报头
Args:
msg (str): 发送的信息。
Raises:
e: 预料之外的错误。
"""
while 1:
msg = str(msg)
try:
if not self.tcpClientSocket:
break
# 加锁
self.lock.acquire()
msgPack = struct.pack('i', len(msg))
self.tcpClientSocket.send(msgPack)
self.tcpClientSocket.send(msg.encode('utf-8'))
PrettyCode.prettyPrint('发送成功。')
# 释放锁
self.lock.release()
if 'keepAlive' not in msg:
# 判断是普通心跳包还是其他信息
break
# 发送间隔
time.sleep(5)
except socket.timeout as timeoutE:
# 释放锁
self.lock.release()
PrettyCode.prettyPrint('发送超时,正在尝试重新发送。', 'ERROR')
continue
except Exception as e:
# 释放锁
self.lock.release()
errorMsg = '{}{}'.format(self._errorCheck(e), ',现在重启TCP。')
PrettyCode.prettyPrint(errorMsg ,'ERROR')
# 清空TCP客户端连接
self.tcpClientSocket = None
raise e
def makeInfoMsg(self, taskStatus: dict = {}) -> str:
# 构建报文,default = 'keepAlive',必须携带flag字段
if not taskStatus:
taskStatus = {
'flag': 'ADH18',
'phase': 1,
'ACK': 'keepAlive',
}
if 'flag' not in taskStatus.keys():
self.logObj.logHandler().error('msg need flag.')
raise ValueError('缺少flag值')
msg = json.dumps(taskStatus)
return msg
def TCPConnect(self) -> None:
while 1:
# tcp
if self.tcpOnline.empty():
# 离线状态
tcpClientSocket = socket.socket()
PrettyCode.prettyPrint('TCP对象创建成功。')
# 重连次数
nOfRec = 0
# 连接服务器异常处理
while 1:
recingMsg = '正在连接服务器中 {}'.format(nOfRec)
PrettyCode.prettyPrint(recingMsg)
try:
hostIP = self.config.get('serverConfig').get('host')
tcpClientSocket.connect((hostIP, 11451))
# 获取与套接字关联的本地协议地址
self.maternalIpInfo = (tcpClientSocket.getsockname()[0], 6655)
break
except:
nOfRec += 1
continue
self.tcpOnline.put('ONLINE')
# 连接成功,event为True
self.event.set()
PrettyCode.prettyPrint('服务器连接成功。')
self.tcpClientSocket = tcpClientSocket
time.sleep(10)
def heartbeat(self) -> None:
while 1:
# 循环做异常判断检测用
if not self.tcpClientSocket:
break
# 普通心跳包
msg = self.makeInfoMsg()
try:
# 函数内层会进入循环
# 普通心跳包持续发送
self.sendMsg(msg)
except Exception as e:
# 心跳逻辑层异常
errorMsg = '[hb Error]意料之外的错误,将关闭本次TCP连接。错误信息:{} - {}'.format(e, e.__traceback__.tb_lineno)
PrettyCode.prettyPrint(errorMsg, 'ERROR')
break
# 心跳进程结束
if self.tcpClientSocket:
self.tcpClientSocket.close()
@staticmethod
def performOrderResult(worker):
"""任务执行结果
Args:
worker (obj): sub对象。
Returns:
str: 任务结果信息。
"""
worker.add_done_callback(worker.result)
while 1:
if worker.done():
result = worker.result()
return result
time.sleep(1)
def _performOrderCMD(self, order: str) -> None:
"""执行CMD命令函数
Args:
order (str): CMD命令
"""
self.lock.acquire()
logMsg = 'Task started - {}'.format(order)
self.logObj.logHandler().info(logMsg)
worker = self.taskPool.submit(self.taskExecuteCMD, order, )
self.lock.release()
result = Client.performOrderResult(worker)
msg = '{} - 任务完成。'.format(order)
PrettyCode.prettyPrint(msg)
def _performOrderRedis(self, taskId: str, standardEnd=True) -> None:
"""执行Redis命令函数
Args:
taskId (str): 任务编号
standardEnd (bool, optional): 执行模式. Defaults to True.
"""
# 获取任务列表,从优先级最高到最低(zrange value 低的值优先级高) -> (任务,优先级)
try:
taskBook = self.redisObj.redisPointer().zrange(taskId, 0, -1, withscores=True, desc=True)
if taskBook:
# 正常获取
PrettyCode.prettyPrint('任务获取成功。')
# 构造ADH27 -> 接收报文
initializationTaskInfo = {
'flag': 'ADH27',
'code': taskId,
'phase': 1,
'working': None,
'complete': [],
# 添加任务至未完成列表并上传到redis
'oncall': [i[0] for i in taskBook],
}
# 发送讯息已经接收到任务,即将开始执行
taskInfo = self.makeInfoMsg(initializationTaskInfo)
# print('接收报文', taskInfo)
self.sendMsg(taskInfo)
else:
# 任务book为空,通知SERVER
raise ValueError('taskbook is null!')
except Exception as e:
# 发送rcc为2的ADH33报文
errorMsg = '{} - {}'.format(e, e.__traceback__.tb_lineno)
self.sendMsg(self.makeInfoMsg(self._structureADH33Msg(2, taskId, (errorMsg, ))))
PrettyCode.prettyPrint('任务获取失败。')
raise ValueError('任务获取失败。')
# 开始执行任务
for task in taskBook:
# 上锁
self.lock.acquire()
msg = '开始执行 - {}'.format(task[0])
PrettyCode.prettyPrint(msg)
# 向线程池提交任务 -> (任务,优先级)
worker = self.taskPool.submit(self.taskExecuteCMD, task[0], )
self.lock.release()
# 发送执行报文
initializationTaskInfo['phase'] = 2
initializationTaskInfo['working'] = task[0]
taskInfo = self.makeInfoMsg(initializationTaskInfo)
self.sendMsg(taskInfo)
# print('执行报文', taskInfo)
worker.add_done_callback(worker.result)
result = Client.performOrderResult(worker)
# 发送任务执行完成报文
initializationTaskInfo['phase'] = 3
taskStatusDict = self._taskReportMsgComplete(initializationTaskInfo, task[0])
taskInfo = self.makeInfoMsg(taskStatusDict)
# print('完成报文', taskInfo)
self.sendMsg(taskInfo)
msg = '{} - 任务完成。'.format(task[0])
PrettyCode.prettyPrint(msg)
# 任务执行间隔
time.sleep(5)
return True
def taskExecuteCMD(self, task):
"""任务执行函数
Args:
task (str): 任务执行命令
"""
try:
self.lock.acquire()
msg = '正在执行 - {}'.format(task)
PrettyCode.prettyPrint(msg)
executor = subprocess.Popen(task, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
result = executor.stdout.read().decode('gbk')
self.lock.release()
return result
except Exception as e:
errorMsg = '{} - {}'.format(e, e.__traceback__.tb_lineno)
self.logObj.logHandler().error(errorMsg)
self.lock.release()
def daemonlogic(self, existsInfo: dict):
# 守护进程
while 1:
for tName, tFunc in existsInfo.items():
if tName not in str(threading.enumerate()):
# 监测离线
if tName == 'heartbeta':
# tcpOnline此时为空,即代表offline
self.tcpOnline.get()
# 如果连接成功则event为true, wait立即返回;如果服务器离线event则改为false,开始阻塞并等待event重新变成true
# 这里进入离线监测了,代表此时TCP已经离线,则设置event为false
self.event.clear()
self.event.wait()
tFunc().start()
time.sleep(10)
def _taskReportMsgComplete(self, info: dict, task: str):
# 当一个任务执行完后更新信息
info['working'] = None
info.get('complete').append(task)
if task == info.get('oncall')[0]:
info.get('oncall').pop(0)
else:
info.get('oncall').remove(task)
return info
def _taskReport(self, code, func):
# 结果信息情况汇报(需要采集客户端信息通道)
report = {
'identifier': code,
'kp': 'keepRogerThat',
'systemInfoTask': func()
}
msg = self.makeInfoMsg(report)
self.sendMsg(msg)
@staticmethod
def _loadingConfig():
# 配置文件
return PrettyCode.loadingConfigJson(r'config.json')
@staticmethod
def _errorCheck(errorInfo):
# 异常情况分析,给出合理错误结果
if str(errorInfo).startswith('[WinError 10054]'):
# 远程主机强迫关闭了一个现有的连接
return '服务器离线'
else:
return '意料之外的错误'
@staticmethod
def _getClientSystemInfo():
# 获取系统信息
hostname = socket.gethostname()
localAddrs = socket.getaddrinfo(hostname, None)
localAddrsIPV4 = [ip[4][0] for ip in localAddrs if ':' not in ip[4][0]]
# 获取mac地址
macUUID = uuid.UUID(int=uuid.getnode()).hex[-12:]
macAddress = '-'.join(macUUID[i: i + 2] for i in range(0, 11, 2))
localInfo = {
'hostname': hostname,
'localAddrsIPV4': localAddrsIPV4,
'MACAddress': macAddress,
}
return localInfo
def _checkPort(self, port: int) -> bool:
# 端口检查
order = 'netstat -ano|findstr {}'.format(port)
# result = subprocess.Popen(order, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = self.taskExecuteCMD(order)
if result:
# 端口被占用
pid = result.split()[-1]
return pid
else:
# 端口未被占用
return False
def _findProcess(self, pid):
# 进程查找
order = 'tasklist|findstr "{}"'.format(pid)
process = self.taskExecuteCMD(order)
process = process.split()[0]
return process
def _killProcess(self, process):
# 结束进程
try:
order = 'taskkill /f /t /im {}'.format(process)
self.taskExecuteCMD(order)
return True
except Exception as e:
self.logObj.logHandler().error(e)
return False
def _structureADH33Msg(self, rcc, taskId=None, *args, **kwargs) -> dict:
answer = {
'flag': 'ADH33',
'RCC': rcc,
'taskId': taskId,
'answerTime': time.time(),
}
errorMsg = args[0][0]
if errorMsg:
answer['errorMsg'] = errorMsg
return answer
def _daemonThread(self, existsInfo: dict) -> thread:
daemonThread = threading.Thread(target=self.daemonlogic, name='daemonThread', args=(existsInfo, ))
daemonThread.setDaemon(True)
return daemonThread
def _hbControl(self):
# 激活心跳
return threading.Thread(target=self.heartbeat, name='heartbeta')
def _dataReportControl(self, method):
# 数据信息汇报
if method == 'get_system':
self._taskReport('ADH56', self._getClientSystemInfo)
def _recvMsgControl(self):
# 接收信息
return threading.Thread(target=self.recvMsg, name='recvMsg')
def dispatch(self):
threadInfoDict = {
'heartbeta': self._hbControl,
'recvMsg': self._recvMsgControl,
}
tPool = ThreadPoolExecutor(max_workers=10)
# 如果此时event为false即代表server已经成功连上,当event为true时,即开始以下线程
self.event.wait()
self._recvMsgControl().start()
self._hbControl().start()
self._daemonThread(threadInfoDict).start()
# 发送在线设备信息
# dataReport = self.taskPool.submit(self._dataReportControl, 'get_system', )
time.sleep(2)
# if dataReport.done():
# PrettyCode.prettyPrint('主机信息上传完成。')
def main(self):
threading.Thread(target=self.TCPConnect, name='TCPConnect').start()
threading.Thread(target=self.dispatch, name='dispatch').start()
def testTask():
pass
if __name__ == "__main__":
mole = Client()
mole.main()
# mole.performOrder('AC131')
|
import click
from sigopt.validate import validate_top_level_dict
from .load_yaml import load_yaml_callback
cluster_filename_option = click.option(
'-f',
'--filename',
type=click.Path(exists=True),
callback=load_yaml_callback(validate_top_level_dict),
help='cluster config yaml file',
default='cluster.yml',
)
|
#!/usr/bin/env python
# coding: utf-8
import os
from pathlib import Path
import io
import geoviews as gv
import panel as pn
import param
from panel.widgets import Checkbox
from mednum.widgets import TreeViewCheckBox
from holoviews.element.tiles import StamenTerrain
from mednum.loaders import *
from pygal.style import Style
from mednum import tools
from mednum.config import *
import cProfile
import copy
import mednum as mind
gv.extension("bokeh")
class OverallParameters(param.Parameterized):
localisation = param.String(default="Jegun", label="")
score = param.Range(default=(0, 250), bounds=(0, 250),)
tout_axes = param.Boolean(True, label="")
interfaces_num = param.ListSelector(label="", default=list(CATEGORIES_INT_NUM_REV.keys()))
infos_num = param.ListSelector(label="", default=list(CATEGORIES_X_INFOS_REV))
comp_admin = param.ListSelector(label="", default=list(CATEGORIES_X_COMP_ADMIN_REV.keys()))
comp_usage_num = param.ListSelector(label="", default=list(CATEGORIES_X_COMP_USAGE_REV.keys()))
point_ref = param.Selector(
default=SELECT[2], objects=SELECT, label="Point de référence",
)
niveau_observation = param.Selector(
default=SELECT[2], objects=SELECT, label="Niveau d'observation",
)
niveau_details = param.Selector(
default=SELECT[2], objects=SELECT, label="Niveau de détail",
)
donnees_infra = param.Action(
lambda x: x, doc="""Données Infra-Communales""", precedence=0.7
)
file_name = param.String(
default="Export_mednum.csv",
doc="""
The filename to save to.""",
)
edit_report = param.Action(
lambda x: x.timestamps.append(dt.datetime.utcnow()),
doc="""Editer un rapport""",
precedence=0.7,
)
tiles = gv.tile_sources.StamenTerrain
df_merged = param.DataFrame()
df_score = param.DataFrame()
def __init__(self, **params):
super(OverallParameters, self).__init__(**params)
interim_data, cont_iris, indice_frag = self.define_paths()
# Merged
output_data_path = interim_data / "add_geom_data_to_merged_data.trc.pqt"
if output_data_path.exists():
import geopandas as gpd
self.df_merged = gpd.read_parquet(output_data_path)
else:
self.df_merged = add_geom_data_to_merged_data(
iris_df(cont_iris), read_merged_data(indice_frag)
)
# Create multindex
self.set_dataframes_indexes()
self.set_dataframes_level()
# Create widgets for indicators
self.define_indices_params()
# Define what is level 0 and level 1 to consider
self.set_entity_levels()
# What is selected in each level
self.get_selected_indice_by_level()
# Define define_searchable_element
self.define_searchable_element()
# Download
self.download = pn.widgets.FileDownload(
label="""Exporter les résultats""",
filename=self.file_name,
callback=self._download_callback,
)
def define_paths(self):
data_path = Path("../data")
if not data_path.exists():
data_path = Path("./data")
if not data_path.exists():
data_path = Path("../../data")
raw_data = data_path / "raw/"
external_data = data_path / "external/"
interim_data = data_path / "interim/"
cont_iris = external_data / "france-geojson" / "contours-iris.geojson"
indice_frag = processed_data / "MERGE_data_clean.csv"
return interim_data, cont_iris, indice_frag
def define_searchable_element(self):
self.seachable_localisation = list(
self.df_merged.index.get_level_values(self.level_0_column_names).unique()
)
def define_indices_params(self):
"""
Create all indices parameters -> Will become a TreeCheckBox or Checkbox
"""
self.g_params = []
for k, widget_opts in TREEVIEW_CHECK_BOX.items():
# Voir si description ne peut être passée
widgets_params = self.create_checkbox_type_widget_params(widget_opts)
self.g_params.append(pn.Param(self.param[k], widgets={k: widgets_params}))
def _download_callback(self):
"""
A FileDownload callback will return a file-like object which can be serialized
and sent to the client.
"""
self.file_name = "Export_%s.csv" % self.point_ref
self.download.filename = self.file_name
sio = io.StringIO()
self.df_score.drop("geometry", axis=1).to_csv(sio, index=False)
sio.seek(0)
return sio
def get_params(self):
paramater_names = [par[0] for par in self.get_param_values()]
return pn.Param(
self.param,
parameters=[par for par in paramater_names if par != "df_merged"],
)
def set_dataframes_level(self):
real_name_level = []
for col in self.df_merged.columns:
if col in CATEGORIES_INDICES.keys():
real_name_level.append((col, CATEGORIES_INDICES[col]))
else:
real_name_level.append((col, col))
self.df_merged.columns = pd.MultiIndex.from_tuples(
real_name_level, names=["variable", "nom"]
)
def set_dataframes_indexes(self):
indexes = list(
set(
list(MAP_COL_WIDGETS["level_0"].values())
+ list(MAP_COL_WIDGETS["level_1"].values())
)
)
self.df_merged.set_index(indexes, inplace=True)
@pn.depends("localisation", "point_ref", "niveau_observation", watch=True)
def set_entity_levels(self):
"""Set the entity levels and point values for this entity.
"""
self.level_0_column, self.level_1_column, self.level_2_column = (
MAP_COL_WIDGETS["level_0"]["index"],
MAP_COL_WIDGETS["level_1"][self.point_ref],
MAP_COL_WIDGETS["level_2"][self.niveau_observation],
)
self.level_0_column_names = MAP_COL_WIDGETS["level_0"]["names"]
self.level_0_value = self.localisation
@pn.depends(
"tout_axes",
"interfaces_num",
"infos_num",
"comp_admin",
"comp_usage_num",
watch=True,
)
def get_selected_indice_by_level(self):
"""get the indices of the selected column
Args:
self ([type]): [description]
Returns:
[type]: [description]
"""
param_values = {k: v for k, v in self.param.get_param_values()}
selected_col = []
for axe, indices in param_values.items():
if axe in TREEVIEW_CHECK_BOX.keys() and indices:
for indice in indices:
try:
selected_col += [CATEGORIES_INDICES_REV[indice]]
except:
pass
self.selected_indices_level_0 = list(set(selected_col))
self.selected_indices_level_1 = list(set(selected_col))
return self.selected_indices_level_0, self.selected_indices_level_1
def create_checkbox_type_widget_params(self, widget_opts):
"""Create dict of widget type and checkbox params .
Args:
widget_opts ([type]): [description]
Returns:
[type]: [description]
"""
if len(widget_opts.items()) > 3:
select_options = [
val["nom"]
for opt, val in widget_opts.items()
if opt not in ["nom", "desc"]
]
descriptions = [
val["desc"]
for opt, val in widget_opts.items()
if opt not in ["nom", "desc"]
]
widget_type = TreeViewCheckBox
widgets_params = {
"type": widget_type,
"select_options": select_options,
"select_all": widget_opts["nom"],
"desc": descriptions,
}
else:
descriptions = widget_opts["desc"]
widget_type = Checkbox
widgets_params = {
"name": widget_opts["nom"],
"type": widget_type,
"value": True,
"desc": descriptions,
}
return widgets_params
def set_real_name(df):
real_name_level = []
for col in df.columns:
if col in CATEGORIES_INDICES.keys():
real_name_level.append((col, CATEGORIES_INDICES[col]))
else:
real_name_level.append((col, col))
return real_name_level
def info_localisation(self):
info_loc = {}
index = self.df_merged.xs(
self.localisation, level=self.level_0_column_names, drop_level=False
).index
ids = index.unique().to_numpy()[0]
names = index.names
for k, v in zip(names, ids):
info_loc[k] = v
return info_loc
def get_indices_properties(self):
indices_properties = {}
tree = copy.deepcopy(TREEVIEW_CHECK_BOX)
for indic_dict in tree.values():
indic_dict.pop("nom", None)
indic_dict.pop("desc", None)
indices_properties.update(indic_dict)
return indices_properties
@pn.depends(
"localisation",
"point_ref",
"niveau_observation",
"tout_axes",
"interfaces_num",
"infos_num",
"comp_admin",
"comp_usage_num",
watch=True,
)
def score_calculation(self):
indices_properties = self.get_indices_properties()
selected_indices = self.selected_indices_level_0
df = self.df_merged.droplevel("nom", axis=1)
info_loc = self.info_localisation()
if selected_indices != []:
selected_indices_aggfunc = {
k: indices_properties[k]["aggfunc"] for k in selected_indices
}
#
map_info = [self.level_0_column_names]
vdims = map_info + selected_indices
# Aggregation selon la fonction specifié (mean, median)
# au niveau level_1_column sur les indice selectionne selected_indices_aggfunc
score_agg_niveau = df.groupby(self.level_1_column).agg(
selected_indices_aggfunc
)
# Select level 25
df_level_2 = df.xs(
info_loc[self.level_2_column],
level=self.level_2_column,
drop_level=False,
)
# Division par l'aggregation sur la zone level_1_column (pondération)
score_niveau = df_level_2[selected_indices].floordiv(score_agg_niveau) * 100
# Dissolution (i.e. agregation geographique) au niveau de découpage souhaité level_0_column
df = df_level_2.dissolve(
by=[self.level_0_column, self.level_0_column_names],
aggfunc=selected_indices_aggfunc,
)
# Score sur les indices merge sur l'index pour récupérer la geometry.
# _BRUT : initial
# _SCORE : Score de l'indice sur le découpage level_0_column divisé par la fonction d'aggragation au level_1_column
scores = df.merge(
score_niveau,
on=[self.level_0_column, self.level_0_column_names],
suffixes=("_BRUT", "_SCORE"),
).drop_duplicates() # Drop duplicate pour supprimer les doublons (zone homogène)
# Calcul des scores sur chaque axes et au total
number_axes = 0
for axe, indices in AXES_INDICES.items():
selected_in_axes = [
k + "_SCORE" for k in indices.keys() if k in selected_indices
]
if selected_in_axes != []:
scores.loc[:, axe] = scores[selected_in_axes].mean(axis=1).astype(int)
number_axes += 1
else:
scores.loc[:, axe] = 0
# Score total
scores.loc[:, "tout_axes"] = scores[list(AXES_INDICES.keys())].sum(axis=1)
if number_axes != 0:
scores.loc[:, "tout_axes"] //= number_axes
self.df_score = df.merge(
scores, on=[self.level_0_column, self.level_0_column_names, "geometry"]
).drop_duplicates() # Suppression des doublons sur les communes découpées en IRIS
else:
df = df.xs(
info_loc[self.level_2_column],
level=self.level_2_column,
drop_level=False,
).dissolve(by=[self.level_0_column, self.level_0_column_names],)
for axe, indices in AXES_INDICES.items():
df.loc[:, axe] = 0
df.loc[:, "tout_axes"] = 0
self.df_score = df
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from app.models import Category, Beverage
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email', 'groups']
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ['url', 'name']
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class BeverageSerializer(serializers.ModelSerializer):
class Meta:
model = Beverage
fields = '__all__'
|
from django.urls import path
from . import views
urlpatterns = [
path('register/', views.registerPage, name="register"),
path('login/', views.loginPage, name="login"),
path('logout/', views.logoutUser, name="logout"),
path('whoweare/', views.whoweare, name="whoweare"),
path('info/', views.info, name="info"),
path('hospital_displays/', views.hospital_displays, name="hospital_displays"),
path('how_to_help/',views.help,name="help"),
path('submit/', views.submit, name="submit"),
path('doctor_profile/', views.doctor_profile, name="doctor_profile"),
path('hospitalinfo/', views.CantfindHospital, name="hospitalinfo"),
path('', views.home, name="home"),
]
|
# -*- coding: utf-8 -*-
"""Top-level package for webexteamsbot."""
# from .Spark import SparkBot # noqa
from .webexteamsbot import TeamsBot # noqa
__author__ = """Hank Preston"""
__email__ = "hank.preston@gmail.com"
__version__ = "0.1.0"
|
__version__ = (0, 0, 1)
from .parser import read, parse_reply
__all__ = ('read', 'parse_reply')
|
import logging
from flask import Blueprint
from burgeon.api.auth.registration_api import RegistrationAPI
from burgeon.api.auth.login_api import LoginAPI
from burgeon.api.auth.logout_api import LogoutAPI
from burgeon.api.auth.user_api import UserAPI
log = logging.getLogger('burgeon.api.auth')
# Loader for flask-login
from burgeon import login
from burgeon.models import User
@login.user_loader
def load_user(id):
return User.query.get(int(id))
auth_blueprint = Blueprint('auth', __name__)
# API resources
registration_view = RegistrationAPI.as_view('register_api')
login_view = LoginAPI.as_view('login_api')
user_view = UserAPI.as_view('user_api')
logout_view = LogoutAPI.as_view('logout_api')
# API Endpoints
auth_blueprint.add_url_rule(
'/auth/register',
view_func=registration_view,
methods=['POST']
)
auth_blueprint.add_url_rule(
'/auth/login',
view_func=login_view,
methods=['POST']
)
auth_blueprint.add_url_rule(
'/auth/user',
view_func=user_view,
methods=['GET']
)
auth_blueprint.add_url_rule(
'/auth/logout',
view_func=logout_view,
methods=['POST']
)
|
# encoding: utf-8
# pylint: disable=no-self-use
"""
Docker provider setup.
"""
from datetime import datetime, timedelta
import functools
import logging
import docker
import types
from flask_login import current_user
from flask_restplus_patched import Resource
from flask_restplus_patched._http import HTTPStatus
from app.extensions.api import Namespace, abort
import sqlalchemy
from app.extensions import api, db
log = logging.getLogger(__name__)
class DockerProvider(object):
def __init__(self, app=None):
if app:
self.init_app(app)
def init_app(self, app):
return self
def connect(self, provider):
client = docker.DockerClient(base_url=provider.auth_url)
return client
def create_server(self, provider, **kwargs):
try:
conn = self.connect(provider)
server = conn.containers.run(kwargs['image'],
command=None,
name=kwargs['name'],
network=kwargs['network'],
# nano_cpus=kwargs['vcpus'],
# mem_limit=kwargs['ram'],
publish_all_ports= True,
detach=True
)
server.private_v4 = ''
return server
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def run_server(self, provider, **kwargs):
try:
conn = self.connect(provider)
server = conn.containers.run(kwargs['image'],
command=None,
name=kwargs['name'],
environment = kwargs['environment'],
restart_policy = {"Name": "unless-stopped"},
ports = kwargs['ports'],
detach=True
)
log.info(server)
return server
except Exception as e:
log.info("Exception: %s", e)
log.info(type(e))
f = str(e)
log.info(type(f))
return f
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def list_servers(self, provider, **kwargs):
try:
conn = self.connect(provider)
return conn.containers.list()
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def get_server(self, provider, name_or_id):
try:
conn = self.connect(provider)
container = conn.containers.get(name_or_id)
server = types.SimpleNamespace()
server.id = container.attrs['Id']
server.created = container.attrs['Created']
server.status = container.attrs['State']['Status']
if container.attrs['State']['Running'] == True:
server.power_state = 1
elif container.attrs['State']['Paused'] == True:
server.power_state = 3
elif container.attrs['State']['Restarting'] == True:
server.power_state = 4
elif container.attrs['State']['OOMKilled'] == True:
server.power_state = 6
elif container.attrs['State']['Dead'] == True:
server.power_state = 7
else:
server.power_state = 0
return server
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_server(self, provider, name_or_id):
try:
conn = self.connect(provider)
container = conn.containers.get(name_or_id)
return container.remove(force=True)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def server_action(self, provider, name_or_id, action, **kwargs):
try:
conn = self.connect(provider)
container = conn.containers.get(name_or_id)
if action == 'reboot':
container.restart()
_e_status = 'running'
elif action == 'hard_reboot':
container.restart()
_e_status = 'running'
elif action == 'pause':
container.pause()
_e_status = 'paused'
elif action == 'unpause':
container.unpause()
_e_status = 'running'
elif action == 'rebuild':
container.restart()
_e_status = 'restarting'
elif action == 'start':
container.start()
_e_status = 'running'
elif action == 'stop':
container.stop()
_e_status = 'exited'
elif action == 'resize_server':
container.update(mem_limit=kwargs['ram'])
_e_status = 'ACTIVE'
elif action == 'confirm_server_resize':
_e_status = 'running'
elif action == 'revert_server_resize':
_e_status = 'running'
elif action == 'status':
_e_status = 'STATUS'
log.info('action to carry out: %s', action)
else:
abort(
code=HTTPStatus.NOT_FOUND,
message="Action does not exist"
)
return _e_status
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def list_images(self, provider):
try:
conn = self.connect(provider)
return conn.images.list()
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def list_flavors(self, provider):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_image_snapshot(self, provider, name, server):
try:
conn = self.connect(provider)
container = conn.containers.get(server)
return container.commit(repository=name)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_image(self, provider, name):
try:
conn = self.connect(provider)
return conn.images.remove(image=name)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_network(self, provider, name, project_id, external=False):
try:
conn = self.connect(provider)
if external == 'False':
internal = True
else:
internal = False
return conn.networks.create(name=name, internal=internal, driver="bridge")
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_network(self, provider, name):
try:
conn = self.connect(provider)
network = conn.networks.get(name)
return network.remove()
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_subnet(self, provider, **kwargs):
try:
conn = self.connect(provider)
ipam_pool = docker.types.IPAMPool(
iprange=kwargs['cidr']
)
ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])
return conn.networks.create(
kwargs['subnet_name'],
driver="bridge",
ipam=ipam_config)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_subnet(self, provider, name):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_router(self, provider, **kwargs):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_router(self, provider, name):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_security_group(self, provider, **kwargs):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_security_group(self, provider, name):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def get_security_group_by_id(self, provider, name):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_security_group_rule(self, provider, **kwargs):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_security_group_rule(self, provider, name):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_project(self, provider, **kwargs):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_volume(self, provider, **kwargs):
try:
conn = self.connect(provider)
return conn.volumes.create(name=kwargs['name'], driver='local')
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def get_volume(self, provider, volume_id):
try:
conn = self.connect(provider)
return conn.volumes.get(volume_id)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_volume(self, provider, volume_id):
try:
conn = self.connect(provider)
volume = conn.volumes.get(volume_id)
return volume.remove()
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def list_volume_snapshots(self, provider, **kwargs):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def create_volume_snapshot(self, provider, **kwargs):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def get_volume_snapshot_by_id(self, provider, volume_id):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def delete_volume_snapshot(self, provider, volume_id):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def attach_volume(self, provider, **kwargs):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def detach_volume(self, provider, **kwargs):
try:
conn = self.connect(provider)
return None
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
def server_logs(self, provider, name_or_id, **kwargs):
try:
conn = self.connect(provider)
container = conn.containers.get(name_or_id)
return container.logs(**kwargs)
except Exception as e:
log.info("Exception: %s", e)
abort(
code=HTTPStatus.UNPROCESSABLE_ENTITY,
message="%s" % e
)
|
import os
from abc import abstractmethod
from pathlib import Path
import yaml
from surround.config import Config
__author__ = 'Akshat Bajaj'
__date__ = '2019/02/18'
class BaseRemote():
def __init__(self):
self.message = ""
self.messages = []
def write_config(self, what_to_write, file_, name, path=None):
"""Write config to a file
:param what_to_write: For example remote, data, model etc.
:type what_to_write: str
:param file_: file to write
:type file_: str
:param name: name of the remote
:type name: str
:param path: path to the remote
:type path: str
"""
if os.path.exists(file_):
with open(file_, "r") as f:
read_config = yaml.safe_load(f) or {}
else:
read_config = {}
if path is None:
if what_to_write in read_config and name not in read_config[what_to_write]:
read_config[what_to_write].append(name)
elif what_to_write not in read_config:
read_config[what_to_write] = [name]
else:
if what_to_write in read_config:
read_config[what_to_write][name] = path
else:
read_config[what_to_write] = {
name: path
}
with open(file_, "w") as f:
yaml.dump(read_config, f, default_flow_style=False)
def read_from_config(self, what_to_read, key):
local = self.read_from_local_config(what_to_read, key)
return local if local is not None else self.read_from_global_config(what_to_read, key)
def read_from_local_config(self, what_to_read, key):
config = Config()
if Path(".surround/config.yaml").exists():
config.read_config_files([".surround/config.yaml"])
read_items = config.get(what_to_read, None)
return read_items.get(key, None) if read_items is not None else None
def read_from_global_config(self, what_to_read, key):
config = Config()
home = str(Path.home())
if Path(os.path.join(home, ".surround/config.yaml")).exists():
config.read_config_files([os.path.join(home, ".surround/config.yaml")])
read_items = config.get(what_to_read, None)
return read_items.get(key, None) if read_items is not None else None
def read_all_from_local_config(self, what_to_read):
config = Config()
if Path(".surround/config.yaml").exists():
config.read_config_files([".surround/config.yaml"])
read_items = config.get(what_to_read, None)
return read_items
def read_all_from_global_config(self, what_to_read):
config = Config()
home = str(Path.home())
if Path(os.path.join(home, ".surround/config.yaml")).exists():
config.read_config_files([os.path.join(home, ".surround/config.yaml")])
read_items = config.get(what_to_read, None)
return read_items
def add(self, add_to, key):
"""Add data to remote
:param add_to: remote to add to
:type add_to: str
:param key: file to add
:type key: str
"""
project_name = self.get_project_name()
if project_name is None:
return self.message
path_to_local_file = Path(os.path.join(add_to, key))
path_to_remote = self.get_path_to_remote(add_to)
if path_to_remote is None:
return self.message
# Append filename
path_to_remote_file = os.path.join(path_to_remote, project_name, key)
if Path(path_to_local_file).is_file() or Path(path_to_remote_file).is_file():
self.write_config(add_to, ".surround/config.yaml", key)
self.add_message("info: file added successfully", False)
else:
self.add_message("error: " + key + " not found.", False)
return self.message
def pull(self, what_to_pull, key=None):
"""Pull from remote
:param what_to_pull: what to pull from remote. By convention it is remote name. If remote name is data, it will pull data.
:type what_to_pull: str
:param key: file to pull
:type key: str
"""
project_name = self.get_project_name()
if project_name is None:
return self.messages
path_to_remote = self.get_path_to_remote(what_to_pull)
if path_to_remote is None:
return self.messages
if key:
relative_path_to_remote_file = os.path.join(project_name, key)
path_to_local_file = os.path.join(what_to_pull, key)
if self.file_exists_locally(path_to_local_file):
return self.message
os.makedirs(what_to_pull, exist_ok=True)
if self.file_exists_on_remote(path_to_remote, relative_path_to_remote_file, False):
response = self.pull_file(what_to_pull, key, path_to_remote, relative_path_to_remote_file, path_to_local_file)
self.add_message(response)
else:
self.add_message("error: file does not exist")
return self.message
files_to_pull = self.read_all_from_local_config(what_to_pull)
self.messages = []
if files_to_pull:
for file_to_pull in files_to_pull:
self.pull(what_to_pull, file_to_pull)
else:
self.add_message("error: No file added to " + what_to_pull)
return self.messages
@abstractmethod
def pull_file(self, what_to_pull, key, path_to_remote, relative_path_to_remote_file, path_to_local_file):
"""Get the file stored on the remote
:param what_to_pull: what to pull from remote
:type what_to_pull: str
:param path_to_remote: path to the remote.
:type path_to_remote: str
:param relative_path_to_remote_file: path to file on remote relative to the remote path
:type relative_path_to_remote_file: str
:param path_to_local_file: path to the local file
:type path_to_local_file: str
"""
def push(self, what_to_push, key=None):
"""Push to remote
:param what_to_push: what to push to remote. By convention it is remote name. If remote name is data, it will push data.
:type what_to_push: str
:param key: file to push
:type key: str
"""
project_name = self.get_project_name()
if project_name is None:
return self.messages
path_to_remote = self.get_path_to_remote(what_to_push)
if path_to_remote is None:
return self.messages
if key:
path_to_remote_file = os.path.join(path_to_remote, project_name, key)
relative_path_to_remote_file = os.path.join(project_name, key)
if self.file_exists_on_remote(path_to_remote, relative_path_to_remote_file):
return self.message
path_to_local_file = os.path.join(what_to_push, key)
os.makedirs(os.path.dirname(path_to_remote_file), exist_ok=True)
if path_to_remote_file and self.file_exists_locally(path_to_local_file, False):
response = self.push_file(what_to_push, key, path_to_remote, relative_path_to_remote_file, path_to_local_file)
self.add_message(response)
else:
self.add_message("error: file does not exist")
return self.message
files_to_push = self.read_all_from_local_config(what_to_push)
self.messages = []
if files_to_push:
for file_to_push in files_to_push:
self.push(what_to_push, file_to_push)
else:
self.add_message("error: No file added to " + what_to_push)
return self.messages
@abstractmethod
def push_file(self, what_to_push, key, path_to_remote, relative_path_to_remote_file, path_to_local_file):
"""Get the file stored on the remote
:param what_to_push: what to push to remote
:type what_to_push: str
:param path_to_remote: path to the remote.
:type path_to_remote: str
:param relative_path_to_remote_file: path to file on remote relative to the remote path
:type relative_path_to_remote_file: str
:param path_to_local_file: path to the local file
:type path_to_local_file: str
"""
def list_(self, remote_to_list):
"""General method for listing files on the remote
:param remote_to_list: remote to list
:type remote_to_list: str
"""
project_name = self.get_project_name()
if project_name is None:
return self.message
path_to_remote = self.get_path_to_remote(remote_to_list)
if path_to_remote is None:
return self.message
return self.list_files(path_to_remote, project_name)
@abstractmethod
def list_files(self, path_to_remote, project_name):
"""List the files in the remote
:param path_to_remote: path to the remote
:type path_to_remote: str
:param project_name: name of the project
:type project_name: str
"""
raise NotImplementedError
def get_file_name(self, file_):
"""Extract filename from path
:param file_: path to file
:type file_: str
"""
return os.path.basename(file_)
def get_project_name(self):
project_name = self.read_from_local_config("project-info", "project-name")
if project_name:
return project_name
self.add_message("error: project name not present in config")
def get_path_to_remote(self, remote_to_read):
remote = self.read_from_config("remote", remote_to_read)
if remote:
return remote
self.add_message("error: no remote named " + remote_to_read)
def add_message(self, message, append_to=True):
"""Store message and if required append that to the list
:param message: message to display
:type message: str
:param append_to: append message to messages list
:type append_to: bool
"""
self.message = message
if append_to:
self.messages.append(self.message)
@abstractmethod
def file_exists_on_remote(self, path_to_remote, relative_path_to_remote_file, append_to=True):
"""Check if file is already present on remote. This is used to prevent overwriting of files.
:param path_to_remote: path to remote
:type path_to_remote: str
:param relative_path_to_remote_file: path to file on remote relative to the remote path
:type relative_path_to_remote_file: str
:param append_to: Append message to messages list. By default, it is true.
:type append_to: bool
"""
def file_exists_locally(self, path_to_file, append_to=True):
"""Check if file is already present on remote. This is used to prevent overwriting of files.
:param path_to_file: path to file
:type path_to_file: str
:param append_to: Append message to messages list. By default, it is true.
:type append_to: bool
"""
if Path(path_to_file).exists():
self.add_message("info: " + path_to_file + " already exists", append_to)
return True
|
from distutils.core import setup
setup(
name = "rewemo",
version = "0.1.0",
description = "Renewable energy time series from numerical weather model data",
author = "Harald G Svendsen",
author_email = "harald.svendsen@sintef.no",
license = "MIT License (http://opensource.org/licenses/MIT)",
classifiers = [
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License"
],
packages = ["rewemo"],
package_dir= {"": "src"},
python_requires = ">=3.7"
)
|
"""Generated wrapper for Timelock6h Solidity contract."""
# pylint: disable=too-many-arguments
import json
import time
from typing import ( # pylint: disable=unused-import
Optional,
Tuple,
Union,
)
from eth_utils import to_checksum_address
from hexbytes import HexBytes
from web3.contract import ContractFunction
from web3.datastructures import AttributeDict
from web3.exceptions import ContractLogicError
from moody import Bolors
from moody.libeb import MiliDoS
from moody.m.bases import ContractMethod, Validator, ContractBase
from moody.m.tx_params import TxParams
# Try to import a custom validator class definition; if there isn't one,
# declare one that we can instantiate for the default argument to the
# constructor for Timelock6h below.
try:
# both mypy and pylint complain about what we're doing here, but this
# works just fine, so their messages have been disabled here.
from . import ( # type: ignore # pylint: disable=import-self
Timelock6hValidator,
)
except ImportError:
class Timelock6hValidator( # type: ignore
Validator
):
"""No-op input validator."""
try:
from .middleware import MIDDLEWARE # type: ignore
except ImportError:
pass
class GracePeriodMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the GRACE_PERIOD method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction):
"""Persist instance data."""
super().__init__(elib, contract_address)
self._underlying_method = contract_function
def block_call(self, debug: bool = False) -> int:
_fn = self._underlying_method()
returned = _fn.call({
'from': self._operate
})
return int(returned)
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class MaximumDelayMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the MAXIMUM_DELAY method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction):
"""Persist instance data."""
super().__init__(elib, contract_address)
self._underlying_method = contract_function
def block_call(self, debug: bool = False) -> int:
_fn = self._underlying_method()
returned = _fn.call({
'from': self._operate
})
return int(returned)
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class MinimumDelayMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the MINIMUM_DELAY method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction):
"""Persist instance data."""
super().__init__(elib, contract_address)
self._underlying_method = contract_function
def block_call(self, debug: bool = False) -> int:
_fn = self._underlying_method()
returned = _fn.call({
'from': self._operate
})
return int(returned)
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class AcceptAdminMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the acceptAdmin method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction):
"""Persist instance data."""
super().__init__(elib, contract_address)
self._underlying_method = contract_function
def block_send(self, gas: int, price: int, val: int = 0, debug: bool = False, receiptListen: bool = False) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method()
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': gas,
'gasPrice': price
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if val > 0:
_t['value'] = val
if debug:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if receiptListen is True:
print("======== awaiting Confirmation 🚸️ -accept_admin")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if debug:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if receiptListen is True and tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET}")
if receiptListen is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: accept_admin")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class AdminMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the admin method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction):
"""Persist instance data."""
super().__init__(elib, contract_address)
self._underlying_method = contract_function
def block_call(self, debug: bool = False) -> str:
_fn = self._underlying_method()
returned = _fn.call({
'from': self._operate
})
return str(returned)
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class CancelTransactionMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the cancelTransaction method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int) -> any:
"""Validate the inputs to the cancelTransaction method."""
self.validator.assert_valid(
method_name='cancelTransaction',
parameter_name='target',
argument_value=target,
)
target = self.validate_and_checksum_address(target)
self.validator.assert_valid(
method_name='cancelTransaction',
parameter_name='value',
argument_value=value,
)
# safeguard against fractional inputs
value = int(value)
self.validator.assert_valid(
method_name='cancelTransaction',
parameter_name='signature',
argument_value=signature,
)
self.validator.assert_valid(
method_name='cancelTransaction',
parameter_name='data',
argument_value=data,
)
self.validator.assert_valid(
method_name='cancelTransaction',
parameter_name='eta',
argument_value=eta,
)
# safeguard against fractional inputs
eta = int(eta)
return (target, value, signature, data, eta)
def block_send(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int, gas: int, price: int, val: int = 0, debug: bool = False, receiptListen: bool = False) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method(target, value, signature, data, eta)
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': gas,
'gasPrice': price
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if val > 0:
_t['value'] = val
if debug:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if receiptListen is True:
print("======== awaiting Confirmation 🚸️ -cancel_transaction")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if debug:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if receiptListen is True and tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET}")
if receiptListen is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: cancel_transaction")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(target, value, signature, data, eta) = self.validate_and_normalize_inputs(target, value, signature, data, eta)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(target, value, signature, data, eta).transact(tx_params.as_dict())
def build_transaction(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
(target, value, signature, data, eta) = self.validate_and_normalize_inputs(target, value, signature, data, eta)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(target, value, signature, data, eta).buildTransaction(tx_params.as_dict())
def estimate_gas(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(target, value, signature, data, eta) = self.validate_and_normalize_inputs(target, value, signature, data, eta)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(target, value, signature, data, eta).estimateGas(tx_params.as_dict())
class DelayMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the delay method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction):
"""Persist instance data."""
super().__init__(elib, contract_address)
self._underlying_method = contract_function
def block_call(self, debug: bool = False) -> int:
_fn = self._underlying_method()
returned = _fn.call({
'from': self._operate
})
return int(returned)
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class ExecuteTransactionMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the executeTransaction method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int) -> any:
"""Validate the inputs to the executeTransaction method."""
self.validator.assert_valid(
method_name='executeTransaction',
parameter_name='target',
argument_value=target,
)
target = self.validate_and_checksum_address(target)
self.validator.assert_valid(
method_name='executeTransaction',
parameter_name='value',
argument_value=value,
)
# safeguard against fractional inputs
value = int(value)
self.validator.assert_valid(
method_name='executeTransaction',
parameter_name='signature',
argument_value=signature,
)
self.validator.assert_valid(
method_name='executeTransaction',
parameter_name='data',
argument_value=data,
)
self.validator.assert_valid(
method_name='executeTransaction',
parameter_name='eta',
argument_value=eta,
)
# safeguard against fractional inputs
eta = int(eta)
return (target, value, signature, data, eta)
def block_send(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int, gas: int, price: int, val: int = 0, debug: bool = False, receiptListen: bool = False) -> Union[bytes, str]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method(target, value, signature, data, eta)
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': gas,
'gasPrice': price
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if val > 0:
_t['value'] = val
if debug:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if receiptListen is True:
print("======== awaiting Confirmation 🚸️ -execute_transaction")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if debug:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if receiptListen is True and tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET}")
if receiptListen is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: execute_transaction")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(target, value, signature, data, eta) = self.validate_and_normalize_inputs(target, value, signature, data, eta)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(target, value, signature, data, eta).transact(tx_params.as_dict())
def build_transaction(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
(target, value, signature, data, eta) = self.validate_and_normalize_inputs(target, value, signature, data, eta)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(target, value, signature, data, eta).buildTransaction(tx_params.as_dict())
def estimate_gas(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(target, value, signature, data, eta) = self.validate_and_normalize_inputs(target, value, signature, data, eta)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(target, value, signature, data, eta).estimateGas(tx_params.as_dict())
class PendingAdminMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the pendingAdmin method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction):
"""Persist instance data."""
super().__init__(elib, contract_address)
self._underlying_method = contract_function
def block_call(self, debug: bool = False) -> str:
_fn = self._underlying_method()
returned = _fn.call({
'from': self._operate
})
return str(returned)
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class QueueTransactionMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the queueTransaction method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int) -> any:
"""Validate the inputs to the queueTransaction method."""
self.validator.assert_valid(
method_name='queueTransaction',
parameter_name='target',
argument_value=target,
)
target = self.validate_and_checksum_address(target)
self.validator.assert_valid(
method_name='queueTransaction',
parameter_name='value',
argument_value=value,
)
# safeguard against fractional inputs
value = int(value)
self.validator.assert_valid(
method_name='queueTransaction',
parameter_name='signature',
argument_value=signature,
)
self.validator.assert_valid(
method_name='queueTransaction',
parameter_name='data',
argument_value=data,
)
self.validator.assert_valid(
method_name='queueTransaction',
parameter_name='eta',
argument_value=eta,
)
# safeguard against fractional inputs
eta = int(eta)
return (target, value, signature, data, eta)
def block_send(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int, gas: int, price: int, val: int = 0, debug: bool = False, receiptListen: bool = False) -> Union[bytes, str]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method(target, value, signature, data, eta)
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': gas,
'gasPrice': price
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if val > 0:
_t['value'] = val
if debug:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if receiptListen is True:
print("======== awaiting Confirmation 🚸️ -queue_transaction")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if debug:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if receiptListen is True and tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET}")
if receiptListen is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: queue_transaction")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(target, value, signature, data, eta) = self.validate_and_normalize_inputs(target, value, signature, data, eta)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(target, value, signature, data, eta).transact(tx_params.as_dict())
def build_transaction(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
(target, value, signature, data, eta) = self.validate_and_normalize_inputs(target, value, signature, data, eta)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(target, value, signature, data, eta).buildTransaction(tx_params.as_dict())
def estimate_gas(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(target, value, signature, data, eta) = self.validate_and_normalize_inputs(target, value, signature, data, eta)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(target, value, signature, data, eta).estimateGas(tx_params.as_dict())
class QueuedTransactionsMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the queuedTransactions method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, index_0: Union[bytes, str]) -> any:
"""Validate the inputs to the queuedTransactions method."""
self.validator.assert_valid(
method_name='queuedTransactions',
parameter_name='index_0',
argument_value=index_0,
)
return (index_0)
def block_call(self, index_0: Union[bytes, str], debug: bool = False) -> bool:
_fn = self._underlying_method(index_0)
returned = _fn.call({
'from': self._operate
})
return bool(returned)
def estimate_gas(self, index_0: Union[bytes, str], tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(index_0) = self.validate_and_normalize_inputs(index_0)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(index_0).estimateGas(tx_params.as_dict())
class SetDelayMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the setDelay method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, delay_: int) -> any:
"""Validate the inputs to the setDelay method."""
self.validator.assert_valid(
method_name='setDelay',
parameter_name='delay_',
argument_value=delay_,
)
# safeguard against fractional inputs
delay_ = int(delay_)
return (delay_)
def block_send(self, delay_: int, gas: int, price: int, val: int = 0, debug: bool = False, receiptListen: bool = False) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method(delay_)
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': gas,
'gasPrice': price
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if val > 0:
_t['value'] = val
if debug:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if receiptListen is True:
print("======== awaiting Confirmation 🚸️ -set_delay")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if debug:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if receiptListen is True and tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET}")
if receiptListen is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: set_delay")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, delay_: int, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(delay_) = self.validate_and_normalize_inputs(delay_)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(delay_).transact(tx_params.as_dict())
def build_transaction(self, delay_: int, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
(delay_) = self.validate_and_normalize_inputs(delay_)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(delay_).buildTransaction(tx_params.as_dict())
def estimate_gas(self, delay_: int, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(delay_) = self.validate_and_normalize_inputs(delay_)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(delay_).estimateGas(tx_params.as_dict())
class SetPendingAdminMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the setPendingAdmin method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, pending_admin_: str) -> any:
"""Validate the inputs to the setPendingAdmin method."""
self.validator.assert_valid(
method_name='setPendingAdmin',
parameter_name='pendingAdmin_',
argument_value=pending_admin_,
)
pending_admin_ = self.validate_and_checksum_address(pending_admin_)
return (pending_admin_)
def block_send(self, pending_admin_: str, gas: int, price: int, val: int = 0, debug: bool = False, receiptListen: bool = False) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method(pending_admin_)
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': gas,
'gasPrice': price
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if val > 0:
_t['value'] = val
if debug:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if receiptListen is True:
print("======== awaiting Confirmation 🚸️ -set_pending_admin")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if debug:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if receiptListen is True and tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET}")
if receiptListen is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: set_pending_admin")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, pending_admin_: str, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(pending_admin_) = self.validate_and_normalize_inputs(pending_admin_)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(pending_admin_).transact(tx_params.as_dict())
def build_transaction(self, pending_admin_: str, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
(pending_admin_) = self.validate_and_normalize_inputs(pending_admin_)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(pending_admin_).buildTransaction(tx_params.as_dict())
def estimate_gas(self, pending_admin_: str, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(pending_admin_) = self.validate_and_normalize_inputs(pending_admin_)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(pending_admin_).estimateGas(tx_params.as_dict())
class SignatureGenerator:
_function_signatures = {}
def __init__(self, abi: any):
for func in [obj for obj in abi if obj['type'] == 'function']:
name = func['name']
types = [input['type'] for input in func['inputs']]
self._function_signatures[name] = '{}({})'.format(name, ','.join(types))
def grace_period(self) -> str:
return self._function_signatures["GRACE_PERIOD"]
def maximum_delay(self) -> str:
return self._function_signatures["MAXIMUM_DELAY"]
def minimum_delay(self) -> str:
return self._function_signatures["MINIMUM_DELAY"]
def accept_admin(self) -> str:
return self._function_signatures["acceptAdmin"]
def admin(self) -> str:
return self._function_signatures["admin"]
def cancel_transaction(self) -> str:
return self._function_signatures["cancelTransaction"]
def delay(self) -> str:
return self._function_signatures["delay"]
def execute_transaction(self) -> str:
return self._function_signatures["executeTransaction"]
def pending_admin(self) -> str:
return self._function_signatures["pendingAdmin"]
def queue_transaction(self) -> str:
return self._function_signatures["queueTransaction"]
def queued_transactions(self) -> str:
return self._function_signatures["queuedTransactions"]
def set_delay(self) -> str:
return self._function_signatures["setDelay"]
def set_pending_admin(self) -> str:
return self._function_signatures["setPendingAdmin"]
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class Timelock6h(ContractBase):
"""Wrapper class for Timelock6h Solidity contract.
All method parameters of type `bytes`:code: should be encoded as UTF-8,
which can be accomplished via `str.encode("utf_8")`:code:.
"""
_fn_grace_period: GracePeriodMethod
"""Constructor-initialized instance of
:class:`GracePeriodMethod`.
"""
_fn_maximum_delay: MaximumDelayMethod
"""Constructor-initialized instance of
:class:`MaximumDelayMethod`.
"""
_fn_minimum_delay: MinimumDelayMethod
"""Constructor-initialized instance of
:class:`MinimumDelayMethod`.
"""
_fn_accept_admin: AcceptAdminMethod
"""Constructor-initialized instance of
:class:`AcceptAdminMethod`.
"""
_fn_admin: AdminMethod
"""Constructor-initialized instance of
:class:`AdminMethod`.
"""
_fn_cancel_transaction: CancelTransactionMethod
"""Constructor-initialized instance of
:class:`CancelTransactionMethod`.
"""
_fn_delay: DelayMethod
"""Constructor-initialized instance of
:class:`DelayMethod`.
"""
_fn_execute_transaction: ExecuteTransactionMethod
"""Constructor-initialized instance of
:class:`ExecuteTransactionMethod`.
"""
_fn_pending_admin: PendingAdminMethod
"""Constructor-initialized instance of
:class:`PendingAdminMethod`.
"""
_fn_queue_transaction: QueueTransactionMethod
"""Constructor-initialized instance of
:class:`QueueTransactionMethod`.
"""
_fn_queued_transactions: QueuedTransactionsMethod
"""Constructor-initialized instance of
:class:`QueuedTransactionsMethod`.
"""
_fn_set_delay: SetDelayMethod
"""Constructor-initialized instance of
:class:`SetDelayMethod`.
"""
_fn_set_pending_admin: SetPendingAdminMethod
"""Constructor-initialized instance of
:class:`SetPendingAdminMethod`.
"""
def __init__(
self,
core_lib: MiliDoS,
contract_address: str,
validator: Timelock6hValidator = None,
):
"""Get an instance of wrapper for smart contract.
"""
# pylint: disable=too-many-statements
super().__init__()
self.contract_address = contract_address
web3 = core_lib.w3
if not validator:
validator = Timelock6hValidator(web3, contract_address)
# if any middleware was imported, inject it
try:
MIDDLEWARE
except NameError:
pass
else:
try:
for middleware in MIDDLEWARE:
web3.middleware_onion.inject(
middleware['function'], layer=middleware['layer'],
)
except ValueError as value_error:
if value_error.args == ("You can't add the same un-named instance twice",):
pass
self._web3_eth = web3.eth
functions = self._web3_eth.contract(address=to_checksum_address(contract_address), abi=Timelock6h.abi()).functions
self.SIGNATURES = SignatureGenerator(Timelock6h.abi())
self._fn_grace_period = GracePeriodMethod(core_lib, contract_address, functions.GRACE_PERIOD)
self._fn_maximum_delay = MaximumDelayMethod(core_lib, contract_address, functions.MAXIMUM_DELAY)
self._fn_minimum_delay = MinimumDelayMethod(core_lib, contract_address, functions.MINIMUM_DELAY)
self._fn_accept_admin = AcceptAdminMethod(core_lib, contract_address, functions.acceptAdmin)
self._fn_admin = AdminMethod(core_lib, contract_address, functions.admin)
self._fn_cancel_transaction = CancelTransactionMethod(core_lib, contract_address, functions.cancelTransaction, validator)
self._fn_delay = DelayMethod(core_lib, contract_address, functions.delay)
self._fn_execute_transaction = ExecuteTransactionMethod(core_lib, contract_address, functions.executeTransaction, validator)
self._fn_pending_admin = PendingAdminMethod(core_lib, contract_address, functions.pendingAdmin)
self._fn_queue_transaction = QueueTransactionMethod(core_lib, contract_address, functions.queueTransaction, validator)
self._fn_queued_transactions = QueuedTransactionsMethod(core_lib, contract_address, functions.queuedTransactions, validator)
self._fn_set_delay = SetDelayMethod(core_lib, contract_address, functions.setDelay, validator)
self._fn_set_pending_admin = SetPendingAdminMethod(core_lib, contract_address, functions.setPendingAdmin, validator)
def event_cancel_transaction(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""
Implementation of event cancel_transaction in contract Timelock6h
Get log entry for CancelTransaction event.
:param tx_hash: hash of transaction emitting CancelTransaction event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return self._web3_eth.contract(address=to_checksum_address(self.contract_address), abi=Timelock6h.abi()).events.CancelTransaction().processReceipt(tx_receipt)
def event_execute_transaction(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""
Implementation of event execute_transaction in contract Timelock6h
Get log entry for ExecuteTransaction event.
:param tx_hash: hash of transaction emitting ExecuteTransaction event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return self._web3_eth.contract(address=to_checksum_address(self.contract_address), abi=Timelock6h.abi()).events.ExecuteTransaction().processReceipt(tx_receipt)
def event_new_admin(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""
Implementation of event new_admin in contract Timelock6h
Get log entry for NewAdmin event.
:param tx_hash: hash of transaction emitting NewAdmin event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return self._web3_eth.contract(address=to_checksum_address(self.contract_address), abi=Timelock6h.abi()).events.NewAdmin().processReceipt(tx_receipt)
def event_new_delay(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""
Implementation of event new_delay in contract Timelock6h
Get log entry for NewDelay event.
:param tx_hash: hash of transaction emitting NewDelay event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return self._web3_eth.contract(address=to_checksum_address(self.contract_address), abi=Timelock6h.abi()).events.NewDelay().processReceipt(tx_receipt)
def event_new_pending_admin(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""
Implementation of event new_pending_admin in contract Timelock6h
Get log entry for NewPendingAdmin event.
:param tx_hash: hash of transaction emitting NewPendingAdmin event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return self._web3_eth.contract(address=to_checksum_address(self.contract_address), abi=Timelock6h.abi()).events.NewPendingAdmin().processReceipt(tx_receipt)
def event_queue_transaction(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""
Implementation of event queue_transaction in contract Timelock6h
Get log entry for QueueTransaction event.
:param tx_hash: hash of transaction emitting QueueTransaction event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return self._web3_eth.contract(address=to_checksum_address(self.contract_address), abi=Timelock6h.abi()).events.QueueTransaction().processReceipt(tx_receipt)
def grace_period(self) -> int:
"""
Implementation of grace_period in contract Timelock6h
Method of the function
"""
return self._fn_grace_period.block_call()
def maximum_delay(self) -> int:
"""
Implementation of maximum_delay in contract Timelock6h
Method of the function
"""
return self._fn_maximum_delay.block_call()
def minimum_delay(self) -> int:
"""
Implementation of minimum_delay in contract Timelock6h
Method of the function
"""
return self._fn_minimum_delay.block_call()
def accept_admin(self) -> None:
"""
Implementation of accept_admin in contract Timelock6h
Method of the function
"""
return self._fn_accept_admin.block_send(self.call_contract_fee_amount, self.call_contract_fee_price, 0, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def admin(self) -> str:
"""
Implementation of admin in contract Timelock6h
Method of the function
"""
return self._fn_admin.block_call()
def cancel_transaction(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int) -> None:
"""
Implementation of cancel_transaction in contract Timelock6h
Method of the function
"""
return self._fn_cancel_transaction.block_send(target, value, signature, data, eta, self.call_contract_fee_amount, self.call_contract_fee_price, 0, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def delay(self) -> int:
"""
Implementation of delay in contract Timelock6h
Method of the function
"""
return self._fn_delay.block_call()
def execute_transaction(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int, wei: int = 0) -> Union[bytes, str]:
"""
Implementation of execute_transaction in contract Timelock6h
Method of the function
"""
return self._fn_execute_transaction.block_send(target, value, signature, data, eta, self.call_contract_fee_amount, self.call_contract_fee_price, wei, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def pending_admin(self) -> str:
"""
Implementation of pending_admin in contract Timelock6h
Method of the function
"""
return self._fn_pending_admin.block_call()
def queue_transaction(self, target: str, value: int, signature: str, data: Union[bytes, str], eta: int) -> Union[bytes, str]:
"""
Implementation of queue_transaction in contract Timelock6h
Method of the function
"""
return self._fn_queue_transaction.block_send(target, value, signature, data, eta, self.call_contract_fee_amount, self.call_contract_fee_price, 0, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def queued_transactions(self, index_0: Union[bytes, str]) -> bool:
"""
Implementation of queued_transactions in contract Timelock6h
Method of the function
"""
return self._fn_queued_transactions.block_call(index_0)
def set_delay(self, delay_: int) -> None:
"""
Implementation of set_delay in contract Timelock6h
Method of the function
"""
return self._fn_set_delay.block_send(delay_, self.call_contract_fee_amount, self.call_contract_fee_price, 0, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def set_pending_admin(self, pending_admin_: str) -> None:
"""
Implementation of set_pending_admin in contract Timelock6h
Method of the function
"""
return self._fn_set_pending_admin.block_send(pending_admin_, self.call_contract_fee_amount, self.call_contract_fee_price, 0, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def CallContractWait(self, t_long: int) -> "Timelock6h":
self._fn_grace_period.setWait(t_long)
self._fn_maximum_delay.setWait(t_long)
self._fn_minimum_delay.setWait(t_long)
self._fn_accept_admin.setWait(t_long)
self._fn_admin.setWait(t_long)
self._fn_cancel_transaction.setWait(t_long)
self._fn_delay.setWait(t_long)
self._fn_execute_transaction.setWait(t_long)
self._fn_pending_admin.setWait(t_long)
self._fn_queue_transaction.setWait(t_long)
self._fn_queued_transactions.setWait(t_long)
self._fn_set_delay.setWait(t_long)
self._fn_set_pending_admin.setWait(t_long)
return self
@staticmethod
def abi():
"""Return the ABI to the underlying contract."""
return json.loads(
'[{"inputs":[{"internalType":"address","name":"admin_","type":"address"},{"internalType":"uint256","name":"delay_","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"txHash","type":"bytes32"},{"indexed":true,"internalType":"address","name":"target","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"},{"indexed":false,"internalType":"string","name":"signature","type":"string"},{"indexed":false,"internalType":"bytes","name":"data","type":"bytes"},{"indexed":false,"internalType":"uint256","name":"eta","type":"uint256"}],"name":"CancelTransaction","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"txHash","type":"bytes32"},{"indexed":true,"internalType":"address","name":"target","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"},{"indexed":false,"internalType":"string","name":"signature","type":"string"},{"indexed":false,"internalType":"bytes","name":"data","type":"bytes"},{"indexed":false,"internalType":"uint256","name":"eta","type":"uint256"}],"name":"ExecuteTransaction","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"newAdmin","type":"address"}],"name":"NewAdmin","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"newDelay","type":"uint256"}],"name":"NewDelay","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"newPendingAdmin","type":"address"}],"name":"NewPendingAdmin","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"txHash","type":"bytes32"},{"indexed":true,"internalType":"address","name":"target","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"},{"indexed":false,"internalType":"string","name":"signature","type":"string"},{"indexed":false,"internalType":"bytes","name":"data","type":"bytes"},{"indexed":false,"internalType":"uint256","name":"eta","type":"uint256"}],"name":"QueueTransaction","type":"event"},{"payable":true,"stateMutability":"payable","type":"fallback"},{"constant":true,"inputs":[],"name":"GRACE_PERIOD","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"MAXIMUM_DELAY","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"MINIMUM_DELAY","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"acceptAdmin","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"admin","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"target","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"string","name":"signature","type":"string"},{"internalType":"bytes","name":"data","type":"bytes"},{"internalType":"uint256","name":"eta","type":"uint256"}],"name":"cancelTransaction","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"delay","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"target","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"string","name":"signature","type":"string"},{"internalType":"bytes","name":"data","type":"bytes"},{"internalType":"uint256","name":"eta","type":"uint256"}],"name":"executeTransaction","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":true,"inputs":[],"name":"pendingAdmin","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"target","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"string","name":"signature","type":"string"},{"internalType":"bytes","name":"data","type":"bytes"},{"internalType":"uint256","name":"eta","type":"uint256"}],"name":"queueTransaction","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"internalType":"bytes32","name":"index_0","type":"bytes32"}],"name":"queuedTransactions","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"delay_","type":"uint256"}],"name":"setDelay","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"pendingAdmin_","type":"address"}],"name":"setPendingAdmin","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}]'
# noqa: E501 (line-too-long)
)
# pylint: disable=too-many-lines
|
# merge Sort
#
# Time Complexity: O(n*log(n))
# Space Complexity: O(n)
class Solution:
def mergeSorted(self, l1, l2):
ret = []
i, j = 0, 0
while i < len(l1) and j < len(l2):
if l1[i] < l2[j]:
ret.append(l1[i])
i += 1
else:
ret.append(l2[j])
j += 1
# Checking for elements not yet added
while i < len(l1):
ret.append(l1[i])
i +=1
while j < len(l2):
ret.append(l2[j])
j +=1
print(ret)
return ret
def mergeSort(self, array: [int]) -> [int]:
if len(array) > 1:
mid = len(array) // 2
l1 = array[:mid]
l2 = array[mid:]
return self.mergeSorted(self.mergeSort(l1), self.mergeSort(l2))
return array
|
from typing import Union
import us
def build_acs_url(year: Union[int, str] = '2017',
survey: Union[str, int] = '1-Year',
person_or_household: str = 'person',
state: str = 'California',
):
"""
Builds CENSUS FTP-server URL where you can download ACS 1-, 3-, or 5- year estimates.
"""
# Building URL
BASE_URL = "https://www2.census.gov/programs-surveys/acs/data/pums/"
## YEAR
try:
year = int(year)
except ValueError:
raise ValueError('year must be a number.')
if ((0 <= year) & (year <= 17)):
year += 2000
if not ((2000 <= year) & (year <= 2017)):
raise ValueError("Year must be between 2000 and 2017.")
## SURVEY
if type(survey) == str:
survey = survey.title()
####### TO DO ########
# make sure that it's either 1-Year
# or for certain years 3- or 5- year
### IF YEAR < 2007 there is no option to choose 1- 3- or 5- year surveys
if year < 2007:
survey = ''
## PERSON OR HOUSEHOLD
person_or_household = person_or_household.lower()
if ((person_or_household == 'person') or (person_or_household == 'household')):
person_or_household = person_or_household[0]
## STATE
if us.states.lookup(state) is not None:
state_abbr = us.states.lookup(state).abbr.lower()
## URL
YEAR_URL = f'{str(year)}/'
SURVEY_URL = f"{survey}/" if survey else ""
STATE_URL = f'csv_{person_or_household}{state_abbr}.zip'
FINAL_URL = BASE_URL + YEAR_URL + SURVEY_URL + STATE_URL
#
return FINAL_URL
|
# -*- coding: utf-8 -*-
"""
Plot the eigenworms file.
For more information see
https://github.com/openworm/open-worm-analysis-toolbox/issues/79
"""
import sys
import os
import h5py
import numpy as np
import matplotlib.pyplot as plt
#import mpld3
# We must add .. to the path so that we can perform the
# import of open-worm-analysis-toolbox while running this as
# a top-level script (i.e. with __name__ = '__main__')
sys.path.append('..')
import open_worm_analysis_toolbox as mv
def main():
# Open the eigenworms file
features_path = os.path.dirname(mv.features.__file__)
eigenworm_path = os.path.join(features_path, mv.config.EIGENWORM_FILE)
eigenworm_file = h5py.File(eigenworm_path, 'r')
# Extract the data
eigenworms = eigenworm_file["eigenWorms"].value
eigenworm_file.close()
# Print the shape of eigenworm matrix
print(np.shape(eigenworms))
# Plot the eigenworms
for eigenworm_i in range(np.shape(eigenworms)[1]):
plt.plot(eigenworms[:, eigenworm_i])
# mpld3.show()
plt.show()
if __name__ == '__main__':
main()
|
# pylint: disable=no-self-use,invalid-name
from unittest import TestCase
from allennlp.models.archival import load_archive
from allennlp.service.predictors import Predictor
from propara.propara.service.predictors.prostruct_prediction import ProStructPredictor
from propara.propara.models.prostruct_model import ProStructModel
from propara.propara.data.prostruct_dataset_reader import ProStructDatasetReader
class TestProParaPredictor(TestCase):
def test_uses_named_inputs(self):
inputs = {"para_id": "4",
"sentence_texts": ["Plants die.",
"They are buried in sediment.",
"Bacteria is buried in the sediment.",
"Large amounts of sediment gradually pile on top of the original sediment.",
"Pressure builds up.",
"Heat increases.",
"The chemical structure of the buried sediment and plants changes.",
"The sediment and plants are at least one mile underground.",
"The buried area is extremely hot.",
"More chemical changes happen eand the buried material becomes oil."
],
"participants": ["plants",
"bacteria",
"sediment",
"oil"],
"states": [
["?", "?", "sediment", "sediment", "sediment", "sediment", "sediment", "sediment", "one mile underground", "one mile underground", "-"],
["?", "?", "?", "sediment", "sediment", "sediment", "sediment", "sediment", "sediment", "sediment", "-"],
["?", "?", "?", "?", "?", "?", "?", "?", "underground", "underground", "underground"],
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "underground"]]}
archive = load_archive('../tests/fixtures/prostruct/prostruct_toy_model.tar.gz')
predictor = Predictor.from_archive(archive, 'prostruct_prediction')
result = predictor.predict_json(inputs)
assert(result['para_id'] == '4')
assert(result["sentence_texts"] == ["Plants die.",
"They are buried in sediment.",
"Bacteria is buried in the sediment.",
"Large amounts of sediment gradually pile on top of the original sediment.",
"Pressure builds up.",
"Heat increases.",
"The chemical structure of the buried sediment and plants changes.",
"The sediment and plants are at least one mile underground.",
"The buried area is extremely hot.",
"More chemical changes happen eand the buried material becomes oil."
])
assert(result['participants'] == ["plants",
"bacteria",
"sediment",
"oil"])
# This changes with a new model (but some label must be predicted).
print(f"result['top1_labels']: {result['top1_labels']}")
assert(len(result['top1_labels']) > 1)
|
"""
This module contains classes for datasets compatible with Pytorch
"""
from torch.utils.data.dataset import Dataset
from torchvision import transforms
from sklearn.preprocessing import LabelEncoder
from PIL import Image
import numpy as np
class ImageDataset(Dataset):
def __init__(self, files: list, label_encoder: LabelEncoder, teacher_labels=None, size=256, augs=False):
"""
:param files: list of files to include in dataset
:param label_encoder: sklearn label encoder for mapping class labels
:param teacher_labels: path to dark knowledge of teacher model
:param size: size of a picture in a dataset
:param augs: use augmentations
"""
self.files = files
# Class label is a parent directory
self.labels = [path.parent.name for path in self.files]
self.labels = label_encoder.transform(self.labels)
if teacher_labels:
self.teacher_labels = np.load(teacher_labels)
else:
self.teacher_labels = []
self.transformations = transforms.Compose([
transforms.Resize((size, size)),
transforms.ToTensor(),
])
if augs:
self.augs = transforms.Compose([
transforms.RandomApply([
transforms.ColorJitter(brightness=0.2, contrast=0.2),
transforms.RandomAffine(degrees=(-30, 30), scale=(0.75, 1.5))
], p=0.7),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomPerspective(p=0.5)
])
else:
self.augs = None
def __getitem__(self, index):
path = self.files[index]
label = self.labels[index]
image = Image.open(path).convert('RGB')
image.load()
if self.augs:
image = self.augs(image)
image = self.transformations(image)
if len(self.teacher_labels) > 0:
teacher_label = self.teacher_labels[index]
return image, (label, teacher_label)
else:
# Returning in this format allows to use the same code for training with and without teacher model
return image, (label, )
def __len__(self):
return len(self.files)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext as _
from cms.models import CMSPlugin
from cms.extensions import PageExtension
from cms.extensions.extension_pool import extension_pool
from djangocms_attributes_field.fields import AttributesField
class MegamenuExtension(PageExtension):
show_megamenu = models.BooleanField(_('Show Megamenu Placeholder'), null=False, default=False)
extension_pool.register(MegamenuExtension)
class Section(CMSPlugin):
SIZE_CHOICES = (
('2', '1/6'),
('3', '1/4'),
('4', '1/3'),
('6', '1/2'),
('8', '2/3'),
)
size = models.CharField(max_length=1, choices=SIZE_CHOICES, default='2')
title = models.CharField(max_length=255, blank=True)
attributes = AttributesField(verbose_name='Attributes', blank=True)
def __unicode__(self):
return self.get_size_display()
def get_classes(self):
return 'col-sm-%s' % self.size
|
import torch
import torch.nn as nn
def uniform_weights_initialization(model):
for module in model.modules():
if type(module) == nn.Conv2d:
torch.nn.init.xavier_uniform(module.weight)
module.bias.data.fill_(0.01)
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from accelerator_abstract.models import BaseApplicationAnswer
class ApplicationAnswer(BaseApplicationAnswer):
class Meta(BaseApplicationAnswer.Meta):
swappable = swapper.swappable_setting(
BaseApplicationAnswer.Meta.app_label, 'ApplicationAnswer')
|
import itertools
import numpy as np
import tensorflow as tf
import video_prediction as vp
from video_prediction import ops
from video_prediction.models import VideoPredictionModel, SAVPVideoPredictionModel
from video_prediction.models import pix2pix_model, mocogan_model, spectral_norm_model
from video_prediction.models.savp_model import create_encoder, apply_kernels, apply_flows, identity_kernel
from video_prediction.ops import dense, conv2d, flatten, tile_concat
from video_prediction.rnn_ops import BasicConv2DLSTMCell, Conv2DGRUCell
from video_prediction.utils import tf_utils
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
def encoder_fn(inputs, hparams=None):
image_pairs = []
for i in range(hparams.num_views):
suffix = '%d' % i if i > 0 else ''
images = inputs['images' + suffix]
image_pairs.append(images[:hparams.sequence_length - 1])
image_pairs.append(images[1:hparams.sequence_length])
image_pairs = tf.concat(image_pairs, axis=-1)
if 'actions' in inputs:
image_pairs = tile_concat([image_pairs,
tf.expand_dims(tf.expand_dims(inputs['actions'], axis=-2), axis=-2)], axis=-1)
outputs = create_encoder(image_pairs,
e_net=hparams.e_net,
use_e_rnn=hparams.use_e_rnn,
rnn=hparams.rnn,
nz=hparams.nz,
nef=hparams.nef,
n_layers=hparams.n_layers,
norm_layer=hparams.norm_layer)
return outputs
def discriminator_fn(targets, inputs=None, hparams=None):
outputs = {}
if hparams.gan_weight or hparams.vae_gan_weight:
_, pix2pix_outputs = pix2pix_model.discriminator_fn(targets, inputs=inputs, hparams=hparams)
outputs.update(pix2pix_outputs)
if hparams.image_gan_weight or hparams.image_vae_gan_weight or \
hparams.video_gan_weight or hparams.video_vae_gan_weight or \
hparams.acvideo_gan_weight or hparams.acvideo_vae_gan_weight:
_, mocogan_outputs = mocogan_model.discriminator_fn(targets, inputs=inputs, hparams=hparams)
outputs.update(mocogan_outputs)
if hparams.image_sn_gan_weight or hparams.image_sn_vae_gan_weight or \
hparams.video_sn_gan_weight or hparams.video_sn_vae_gan_weight:
_, spectral_norm_outputs = spectral_norm_model.discriminator_fn(targets, inputs=inputs, hparams=hparams)
outputs.update(spectral_norm_outputs)
return None, outputs
class DNACell(tf.nn.rnn_cell.RNNCell):
def __init__(self, inputs, hparams, reuse=None):
super(DNACell, self).__init__(_reuse=reuse)
self.inputs = inputs
self.hparams = hparams
if self.hparams.where_add not in ('input', 'all', 'middle'):
raise ValueError('Invalid where_add %s' % self.hparams.where_add)
batch_size = inputs['images'].shape[1].value
image_shape = inputs['images'].shape.as_list()[2:]
height, width, _ = image_shape
scale_size = max(height, width)
if scale_size == 256:
self.encoder_layer_specs = [
(self.hparams.ngf, False),
(self.hparams.ngf * 2, False),
(self.hparams.ngf * 4, True),
(self.hparams.ngf * 8, True),
(self.hparams.ngf * 8, True),
]
self.decoder_layer_specs = [
(self.hparams.ngf * 8, True),
(self.hparams.ngf * 4, True),
(self.hparams.ngf * 2, False),
(self.hparams.ngf, False),
(self.hparams.ngf, False),
]
elif scale_size == 64:
self.encoder_layer_specs = [
(self.hparams.ngf, True),
(self.hparams.ngf * 2, True),
(self.hparams.ngf * 4, True),
]
self.decoder_layer_specs = [
(self.hparams.ngf * 2, True),
(self.hparams.ngf, True),
(self.hparams.ngf, False),
]
elif scale_size == 32:
self.encoder_layer_specs = [
(self.hparams.ngf, True),
(self.hparams.ngf * 2, True),
]
self.decoder_layer_specs = [
(self.hparams.ngf, True),
(self.hparams.ngf, False),
]
else:
raise NotImplementedError
# output_size
num_masks = self.hparams.last_frames * self.hparams.num_transformed_images + \
int(bool(self.hparams.prev_image_background)) + \
int(bool(self.hparams.first_image_background and not self.hparams.context_images_background)) + \
(self.hparams.context_frames if self.hparams.context_images_background else 0) + \
int(bool(self.hparams.generate_scratch_image))
output_size = {}
for i in range(self.hparams.num_views):
suffix = '%d' % i if i > 0 else ''
output_size['gen_images' + suffix] = tf.TensorShape(image_shape)
output_size['transformed_images' + suffix] = tf.TensorShape(image_shape + [num_masks])
output_size['masks' + suffix] = tf.TensorShape([height, width, 1, num_masks])
if 'pix_distribs' in inputs:
for i in range(self.hparams.num_views):
suffix = '%d' % i if i > 0 else ''
num_motions = inputs['pix_distribs' + suffix].shape[-1].value
output_size['gen_pix_distribs' + suffix] = tf.TensorShape([height, width, num_motions])
output_size['transformed_pix_distribs' + suffix] = tf.TensorShape([height, width, num_motions, num_masks])
if 'states' in inputs:
output_size['gen_states'] = inputs['states'].shape[2:]
if self.hparams.transformation == 'flow':
for i in range(self.hparams.num_views):
suffix = '%d' % i if i > 0 else ''
output_size['gen_flows' + suffix] = tf.TensorShape([height, width, 2, self.hparams.last_frames * self.hparams.num_transformed_images])
output_size['gen_flows_rgb' + suffix] = tf.TensorShape([height, width, 3, self.hparams.last_frames * self.hparams.num_transformed_images])
self._output_size = output_size
# state_size
conv_rnn_state_sizes = []
conv_rnn_height, conv_rnn_width = height, width
for out_channels, use_conv_rnn in self.encoder_layer_specs:
conv_rnn_height //= 2
conv_rnn_width //= 2
if use_conv_rnn:
conv_rnn_state_sizes.append(tf.TensorShape([conv_rnn_height, conv_rnn_width, out_channels]))
for out_channels, use_conv_rnn in self.decoder_layer_specs:
conv_rnn_height *= 2
conv_rnn_width *= 2
if use_conv_rnn:
conv_rnn_state_sizes.append(tf.TensorShape([conv_rnn_height, conv_rnn_width, out_channels]))
if self.hparams.conv_rnn == 'lstm':
conv_rnn_state_sizes = [tf.nn.rnn_cell.LSTMStateTuple(conv_rnn_state_size, conv_rnn_state_size)
for conv_rnn_state_size in conv_rnn_state_sizes]
state_size = {'time': tf.TensorShape([])}
for i in range(self.hparams.num_views):
suffix = '%d' % i if i > 0 else ''
state_size['gen_image' + suffix] = tf.TensorShape(image_shape)
state_size['last_images' + suffix] = [tf.TensorShape(image_shape)] * self.hparams.last_frames
for i in range(self.hparams.num_views):
suffix = '%d' % i if i > 0 else ''
state_size['conv_rnn_states' + suffix] = conv_rnn_state_sizes
if self.hparams.shared_views:
break
if 'zs' in inputs and self.hparams.use_rnn_z:
rnn_z_state_size = tf.TensorShape([self.hparams.nz])
if self.hparams.rnn == 'lstm':
rnn_z_state_size = tf.nn.rnn_cell.LSTMStateTuple(rnn_z_state_size, rnn_z_state_size)
state_size['rnn_z_state'] = rnn_z_state_size
if 'pix_distribs' in inputs:
for i in range(self.hparams.num_views):
suffix = '%d' % i if i > 0 else ''
state_size['gen_pix_distrib' + suffix] = tf.TensorShape([height, width, num_motions])
state_size['last_pix_distribs' + suffix] = [tf.TensorShape([height, width, num_motions])] * self.hparams.last_frames
if 'states' in inputs:
state_size['gen_state'] = inputs['states'].shape[2:]
self._state_size = state_size
ground_truth_sampling_shape = [self.hparams.sequence_length - 1 - self.hparams.context_frames, batch_size]
if self.hparams.schedule_sampling == 'none':
ground_truth_sampling = tf.constant(False, dtype=tf.bool, shape=ground_truth_sampling_shape)
elif self.hparams.schedule_sampling in ('inverse_sigmoid', 'linear'):
if self.hparams.schedule_sampling == 'inverse_sigmoid':
k = self.hparams.schedule_sampling_k
start_step = self.hparams.schedule_sampling_steps[0]
iter_num = tf.to_float(tf.train.get_or_create_global_step())
prob = (k / (k + tf.exp((iter_num - start_step) / k)))
prob = tf.cond(tf.less(iter_num, start_step), lambda: 1.0, lambda: prob)
elif self.hparams.schedule_sampling == 'linear':
start_step, end_step = self.hparams.schedule_sampling_steps
step = tf.clip_by_value(tf.train.get_or_create_global_step(), start_step, end_step)
prob = 1.0 - tf.to_float(step - start_step) / tf.to_float(end_step - start_step)
log_probs = tf.log([1 - prob, prob])
ground_truth_sampling = tf.multinomial([log_probs] * batch_size, ground_truth_sampling_shape[0])
ground_truth_sampling = tf.cast(tf.transpose(ground_truth_sampling, [1, 0]), dtype=tf.bool)
# Ensure that eventually, the model is deterministically
# autoregressive (as opposed to autoregressive with very high probability).
ground_truth_sampling = tf.cond(tf.less(prob, 0.001),
lambda: tf.constant(False, dtype=tf.bool, shape=ground_truth_sampling_shape),
lambda: ground_truth_sampling)
else:
raise NotImplementedError
ground_truth_context = tf.constant(True, dtype=tf.bool, shape=[self.hparams.context_frames, batch_size])
self.ground_truth = tf.concat([ground_truth_context, ground_truth_sampling], axis=0)
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def zero_state(self, batch_size, dtype):
init_state = super(DNACell, self).zero_state(batch_size, dtype)
for i in range(self.hparams.num_views):
suffix = '%d' % i if i > 0 else ''
init_state['last_images' + suffix] = [self.inputs['images' + suffix][0]] * self.hparams.last_frames
if 'pix_distribs' in self.inputs:
init_state['last_pix_distribs' + suffix] = [self.inputs['pix_distribs' + suffix][0]] * self.hparams.last_frames
return init_state
def _rnn_func(self, inputs, state, num_units):
if self.hparams.rnn == 'lstm':
RNNCell = tf.contrib.rnn.BasicLSTMCell
elif self.hparams.rnn == 'gru':
RNNCell = tf.contrib.rnn.GRUCell
else:
raise NotImplementedError
rnn_cell = RNNCell(num_units, reuse=tf.get_variable_scope().reuse)
return rnn_cell(inputs, state)
def _conv_rnn_func(self, inputs, state, filters):
inputs_shape = inputs.get_shape().as_list()
input_shape = inputs_shape[1:]
if self.hparams.norm_layer == 'none':
normalizer_fn = None
else:
normalizer_fn = ops.get_norm_layer(self.hparams.norm_layer)
if self.hparams.conv_rnn == 'lstm':
Conv2DRNNCell = BasicConv2DLSTMCell
elif self.hparams.conv_rnn == 'gru':
Conv2DRNNCell = Conv2DGRUCell
else:
raise NotImplementedError
if self.hparams.ablation_conv_rnn_norm:
conv_rnn_cell = Conv2DRNNCell(input_shape, filters, kernel_size=(5, 5),
reuse=tf.get_variable_scope().reuse)
h, state = conv_rnn_cell(inputs, state)
outputs = (normalizer_fn(h), state)
else:
conv_rnn_cell = Conv2DRNNCell(input_shape, filters, kernel_size=(5, 5),
normalizer_fn=normalizer_fn,
separate_norms=self.hparams.norm_layer == 'layer',
reuse=tf.get_variable_scope().reuse)
outputs = conv_rnn_cell(inputs, state)
return outputs
def call(self, inputs, states):
norm_layer = ops.get_norm_layer(self.hparams.norm_layer)
downsample_layer = ops.get_downsample_layer(self.hparams.downsample_layer)
upsample_layer = ops.get_upsample_layer(self.hparams.upsample_layer)
image_shape = inputs['images'].get_shape().as_list()
batch_size, height, width, color_channels = image_shape
time = states['time']
with tf.control_dependencies([tf.assert_equal(time[1:], time[0])]):
t = tf.to_int32(tf.identity(time[0]))
if 'states' in inputs:
state = tf.where(self.ground_truth[t], inputs['states'], states['gen_state'])
state_action = []
state_action_z = []
if 'actions' in inputs:
state_action.append(inputs['actions'])
state_action_z.append(inputs['actions'])
if 'states' in inputs:
state_action.append(state)
# don't backpropagate the convnet through the state dynamics
state_action_z.append(tf.stop_gradient(state))
if 'zs' in inputs:
if self.hparams.use_rnn_z:
with tf.variable_scope('%s_z' % self.hparams.rnn):
rnn_z, rnn_z_state = self._rnn_func(inputs['zs'], states['rnn_z_state'], self.hparams.nz)
state_action_z.append(rnn_z)
else:
state_action_z.append(inputs['zs'])
def concat(tensors, axis):
if len(tensors) == 0:
return tf.zeros([batch_size, 0])
elif len(tensors) == 1:
return tensors[0]
else:
return tf.concat(tensors, axis=axis)
state_action = concat(state_action, axis=-1)
state_action_z = concat(state_action_z, axis=-1)
image_views = []
first_image_views = []
if 'pix_distribs' in inputs:
pix_distrib_views = []
for i in range(self.hparams.num_views):
suffix = '%d' % i if i > 0 else ''
image_view = tf.where(self.ground_truth[t], inputs['images' + suffix], states['gen_image' + suffix]) # schedule sampling (if any)
image_views.append(image_view)
first_image_views.append(self.inputs['images' + suffix][0])
if 'pix_distribs' in inputs:
pix_distrib_view = tf.where(self.ground_truth[t], inputs['pix_distribs' + suffix], states['gen_pix_distrib' + suffix])
pix_distrib_views.append(pix_distrib_view)
outputs = {}
new_states = {}
all_layers = []
for i in range(self.hparams.num_views):
suffix = '%d' % i if i > 0 else ''
conv_rnn_states = states['conv_rnn_states' + suffix]
layers = []
new_conv_rnn_states = []
for i, (out_channels, use_conv_rnn) in enumerate(self.encoder_layer_specs):
with tf.variable_scope('h%d' % i + suffix):
if i == 0:
# all image views and the first image corresponding to this view only
h = tf.concat(image_views + first_image_views, axis=-1)
kernel_size = (5, 5)
else:
h = layers[-1][-1]
kernel_size = (3, 3)
if self.hparams.where_add == 'all' or (self.hparams.where_add == 'input' and i == 0):
h = tile_concat([h, state_action_z[:, None, None, :]], axis=-1)
h = downsample_layer(h, out_channels, kernel_size=kernel_size, strides=(2, 2))
h = norm_layer(h)
h = tf.nn.relu(h)
if use_conv_rnn:
conv_rnn_state = conv_rnn_states[len(new_conv_rnn_states)]
with tf.variable_scope('%s_h%d' % (self.hparams.conv_rnn, i) + suffix):
if self.hparams.where_add == 'all':
conv_rnn_h = tile_concat([h, state_action_z[:, None, None, :]], axis=-1)
else:
conv_rnn_h = h
conv_rnn_h, conv_rnn_state = self._conv_rnn_func(conv_rnn_h, conv_rnn_state, out_channels)
new_conv_rnn_states.append(conv_rnn_state)
layers.append((h, conv_rnn_h) if use_conv_rnn else (h,))
num_encoder_layers = len(layers)
for i, (out_channels, use_conv_rnn) in enumerate(self.decoder_layer_specs):
with tf.variable_scope('h%d' % len(layers) + suffix):
if i == 0:
h = layers[-1][-1]
else:
h = tf.concat([layers[-1][-1], layers[num_encoder_layers - i - 1][-1]], axis=-1)
if self.hparams.where_add == 'all' or (self.hparams.where_add == 'middle' and i == 0):
h = tile_concat([h, state_action_z[:, None, None, :]], axis=-1)
h = upsample_layer(h, out_channels, kernel_size=(3, 3), strides=(2, 2))
h = norm_layer(h)
h = tf.nn.relu(h)
if use_conv_rnn:
conv_rnn_state = conv_rnn_states[len(new_conv_rnn_states)]
with tf.variable_scope('%s_h%d' % (self.hparams.conv_rnn, len(layers)) + suffix):
if self.hparams.where_add == 'all':
conv_rnn_h = tile_concat([h, state_action_z[:, None, None, :]], axis=-1)
else:
conv_rnn_h = h
conv_rnn_h, conv_rnn_state = self._conv_rnn_func(conv_rnn_h, conv_rnn_state, out_channels)
new_conv_rnn_states.append(conv_rnn_state)
layers.append((h, conv_rnn_h) if use_conv_rnn else (h,))
assert len(new_conv_rnn_states) == len(conv_rnn_states)
new_states['conv_rnn_states' + suffix] = new_conv_rnn_states
all_layers.append(layers)
if self.hparams.shared_views:
break
for i in range(self.hparams.num_views):
suffix = '%d' % i if i > 0 else ''
if self.hparams.shared_views:
layers, = all_layers
else:
layers = all_layers[i]
image = image_views[i]
last_images = states['last_images' + suffix][1:] + [image]
if 'pix_distribs' in inputs:
pix_distrib = pix_distrib_views[i]
last_pix_distribs = states['last_pix_distribs' + suffix][1:] + [pix_distrib]
if self.hparams.last_frames and self.hparams.num_transformed_images:
if self.hparams.transformation == 'flow':
with tf.variable_scope('h%d_flow' % len(layers) + suffix):
h_flow = conv2d(layers[-1][-1], self.hparams.ngf, kernel_size=(3, 3), strides=(1, 1))
h_flow = norm_layer(h_flow)
h_flow = tf.nn.relu(h_flow)
with tf.variable_scope('flows' + suffix):
flows = conv2d(h_flow, 2 * self.hparams.last_frames * self.hparams.num_transformed_images, kernel_size=(3, 3), strides=(1, 1))
flows = tf.reshape(flows, [batch_size, height, width, 2, self.hparams.last_frames * self.hparams.num_transformed_images])
else:
assert len(self.hparams.kernel_size) == 2
kernel_shape = list(self.hparams.kernel_size) + [self.hparams.last_frames * self.hparams.num_transformed_images]
if self.hparams.transformation == 'dna':
with tf.variable_scope('h%d_dna_kernel' % len(layers) + suffix):
h_dna_kernel = conv2d(layers[-1][-1], self.hparams.ngf, kernel_size=(3, 3), strides=(1, 1))
h_dna_kernel = norm_layer(h_dna_kernel)
h_dna_kernel = tf.nn.relu(h_dna_kernel)
# Using largest hidden state for predicting untied conv kernels.
with tf.variable_scope('dna_kernels' + suffix):
kernels = conv2d(h_dna_kernel, np.prod(kernel_shape), kernel_size=(3, 3), strides=(1, 1))
kernels = tf.reshape(kernels, [batch_size, height, width] + kernel_shape)
kernels = kernels + identity_kernel(self.hparams.kernel_size)[None, None, None, :, :, None]
kernel_spatial_axes = [3, 4]
elif self.hparams.transformation == 'cdna':
with tf.variable_scope('cdna_kernels' + suffix):
smallest_layer = layers[num_encoder_layers - 1][-1]
kernels = dense(flatten(smallest_layer), np.prod(kernel_shape))
kernels = tf.reshape(kernels, [batch_size] + kernel_shape)
kernels = kernels + identity_kernel(self.hparams.kernel_size)[None, :, :, None]
kernel_spatial_axes = [1, 2]
else:
raise ValueError('Invalid transformation %s' % self.hparams.transformation)
if self.hparams.transformation != 'flow':
with tf.name_scope('kernel_normalization' + suffix):
kernels = tf.nn.relu(kernels - RELU_SHIFT) + RELU_SHIFT
kernels /= tf.reduce_sum(kernels, axis=kernel_spatial_axes, keepdims=True)
if self.hparams.generate_scratch_image:
with tf.variable_scope('h%d_scratch' % len(layers) + suffix):
h_scratch = conv2d(layers[-1][-1], self.hparams.ngf, kernel_size=(3, 3), strides=(1, 1))
h_scratch = norm_layer(h_scratch)
h_scratch = tf.nn.relu(h_scratch)
# Using largest hidden state for predicting a new image layer.
# This allows the network to also generate one image from scratch,
# which is useful when regions of the image become unoccluded.
with tf.variable_scope('scratch_image' + suffix):
scratch_image = conv2d(h_scratch, color_channels, kernel_size=(3, 3), strides=(1, 1))
scratch_image = tf.nn.sigmoid(scratch_image)
with tf.name_scope('transformed_images' + suffix):
transformed_images = []
if self.hparams.last_frames and self.hparams.num_transformed_images:
if self.hparams.transformation == 'flow':
transformed_images.extend(apply_flows(last_images, flows))
else:
transformed_images.extend(apply_kernels(last_images, kernels, self.hparams.dilation_rate))
if self.hparams.prev_image_background:
transformed_images.append(image)
if self.hparams.first_image_background and not self.hparams.context_images_background:
transformed_images.append(self.inputs['images' + suffix][0])
if self.hparams.context_images_background:
transformed_images.extend(tf.unstack(self.inputs['images' + suffix][:self.hparams.context_frames]))
if self.hparams.generate_scratch_image:
transformed_images.append(scratch_image)
if 'pix_distribs' in inputs:
with tf.name_scope('transformed_pix_distribs' + suffix):
transformed_pix_distribs = []
if self.hparams.last_frames and self.hparams.num_transformed_images:
if self.hparams.transformation == 'flow':
transformed_pix_distribs.extend(apply_flows(last_pix_distribs, flows))
else:
transformed_pix_distribs.extend(apply_kernels(last_pix_distribs, kernels, self.hparams.dilation_rate))
if self.hparams.prev_image_background:
transformed_pix_distribs.append(pix_distrib)
if self.hparams.first_image_background and not self.hparams.context_images_background:
transformed_pix_distribs.append(self.inputs['pix_distribs' + suffix][0])
if self.hparams.context_images_background:
transformed_pix_distribs.extend(tf.unstack(self.inputs['pix_distribs' + suffix][:self.hparams.context_frames]))
if self.hparams.generate_scratch_image:
transformed_pix_distribs.append(pix_distrib)
with tf.name_scope('masks' + suffix):
if len(transformed_images) > 1:
with tf.variable_scope('h%d_masks' % len(layers) + suffix):
h_masks = conv2d(layers[-1][-1], self.hparams.ngf, kernel_size=(3, 3), strides=(1, 1))
h_masks = norm_layer(h_masks)
h_masks = tf.nn.relu(h_masks)
with tf.variable_scope('masks' + suffix):
if self.hparams.dependent_mask:
h_masks = tf.concat([h_masks] + transformed_images, axis=-1)
masks = conv2d(h_masks, len(transformed_images), kernel_size=(3, 3), strides=(1, 1))
masks = tf.nn.softmax(masks)
masks = tf.split(masks, len(transformed_images), axis=-1)
elif len(transformed_images) == 1:
masks = [tf.ones([batch_size, height, width, 1])]
else:
raise ValueError("Either one of the following should be true: "
"last_frames and num_transformed_images, first_image_background, "
"prev_image_background, generate_scratch_image")
with tf.name_scope('gen_images' + suffix):
assert len(transformed_images) == len(masks)
gen_image = tf.add_n([transformed_image * mask
for transformed_image, mask in zip(transformed_images, masks)])
if 'pix_distribs' in inputs:
with tf.name_scope('gen_pix_distribs' + suffix):
assert len(transformed_pix_distribs) == len(masks)
gen_pix_distrib = tf.add_n([transformed_pix_distrib * mask
for transformed_pix_distrib, mask in zip(transformed_pix_distribs, masks)])
if self.hparams.renormalize_pixdistrib:
gen_pix_distrib /= tf.reduce_sum(gen_pix_distrib, axis=(1, 2), keepdims=True)
outputs['gen_images' + suffix] = gen_image
outputs['transformed_images' + suffix] = tf.stack(transformed_images, axis=-1)
outputs['masks' + suffix] = tf.stack(masks, axis=-1)
if 'pix_distribs' in inputs:
outputs['gen_pix_distribs' + suffix] = gen_pix_distrib
outputs['transformed_pix_distribs' + suffix] = tf.stack(transformed_pix_distribs, axis=-1)
if self.hparams.transformation == 'flow':
outputs['gen_flows' + suffix] = flows
flows_transposed = tf.transpose(flows, [0, 1, 2, 4, 3])
flows_rgb_transposed = tf_utils.flow_to_rgb(flows_transposed)
flows_rgb = tf.transpose(flows_rgb_transposed, [0, 1, 2, 4, 3])
outputs['gen_flows_rgb' + suffix] = flows_rgb
new_states['gen_image' + suffix] = gen_image
new_states['last_images' + suffix] = last_images
if 'pix_distribs' in inputs:
new_states['gen_pix_distrib' + suffix] = gen_pix_distrib
new_states['last_pix_distribs' + suffix] = last_pix_distribs
if 'states' in inputs:
with tf.name_scope('gen_states'):
with tf.variable_scope('state_pred'):
gen_state = dense(state_action, inputs['states'].shape[-1].value)
if 'states' in inputs:
outputs['gen_states'] = gen_state
new_states['time'] = time + 1
if 'zs' in inputs and self.hparams.use_rnn_z:
new_states['rnn_z_state'] = rnn_z_state
if 'states' in inputs:
new_states['gen_state'] = gen_state
return outputs, new_states
def generator_fn(inputs, outputs_enc=None, hparams=None):
batch_size = inputs['images'].shape[1].value
inputs = {name: tf_utils.maybe_pad_or_slice(input, hparams.sequence_length - 1)
for name, input in inputs.items()}
if hparams.nz:
def sample_zs():
if outputs_enc is None:
zs = tf.random_normal([hparams.sequence_length - 1, batch_size, hparams.nz], 0, 1)
else:
enc_zs_mu = outputs_enc['enc_zs_mu']
enc_zs_log_sigma_sq = outputs_enc['enc_zs_log_sigma_sq']
eps = tf.random_normal([hparams.sequence_length - 1, batch_size, hparams.nz], 0, 1)
zs = enc_zs_mu + tf.sqrt(tf.exp(enc_zs_log_sigma_sq)) * eps
return zs
inputs['zs'] = sample_zs()
else:
if outputs_enc is not None:
raise ValueError('outputs_enc has to be None when nz is 0.')
cell = DNACell(inputs, hparams)
outputs, _ = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32,
swap_memory=False, time_major=True)
if hparams.nz:
inputs_samples = {name: flatten(tf.tile(input[:, None], [1, hparams.num_samples] + [1] * (input.shape.ndims - 1)), 1, 2)
for name, input in inputs.items() if name != 'zs'}
inputs_samples['zs'] = tf.concat([sample_zs() for _ in range(hparams.num_samples)], axis=1)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
cell_samples = DNACell(inputs_samples, hparams)
outputs_samples, _ = tf.nn.dynamic_rnn(cell_samples, inputs_samples, dtype=tf.float32,
swap_memory=False, time_major=True)
for i in range(hparams.num_views):
suffix = '%d' % i if i > 0 else ''
gen_images_samples = outputs_samples['gen_images' + suffix]
gen_images_samples = tf.stack(tf.split(gen_images_samples, hparams.num_samples, axis=1), axis=-1)
gen_images_samples_avg = tf.reduce_mean(gen_images_samples, axis=-1)
outputs['gen_images_samples' + suffix] = gen_images_samples
outputs['gen_images_samples_avg' + suffix] = gen_images_samples_avg
# the RNN outputs generated images from time step 1 to sequence_length,
# but generator_fn should only return images past context_frames
outputs = {name: output[hparams.context_frames - 1:] for name, output in outputs.items()}
gen_images = outputs['gen_images']
outputs['ground_truth_sampling_mean'] = tf.reduce_mean(tf.to_float(cell.ground_truth[hparams.context_frames:]))
return gen_images, outputs
class MultiSAVPVideoPredictionModel(SAVPVideoPredictionModel):
def __init__(self, *args, **kwargs):
VideoPredictionModel.__init__(self,
generator_fn, discriminator_fn, encoder_fn, *args, **kwargs)
if self.hparams.e_net == 'none' or self.hparams.nz == 0:
self.encoder_fn = None
if self.hparams.d_net == 'none':
self.discriminator_fn = None
self.deterministic = not self.hparams.nz
def get_default_hparams_dict(self):
default_hparams = super(MultiSAVPVideoPredictionModel, self).get_default_hparams_dict()
hparams = dict(
num_views=1,
shared_views=False,
)
return dict(itertools.chain(default_hparams.items(), hparams.items()))
def generator_loss_fn(self, inputs, outputs, targets):
gen_losses = super(MultiSAVPVideoPredictionModel, self).generator_loss_fn(inputs, outputs, targets)
hparams = self.hparams
# TODO: support for other losses of the other views
for i in range(1, hparams.num_views): # skip i = 0 since it should have already been done by the superclass
suffix = '%d' % i if i > 0 else ''
if hparams.l1_weight or hparams.l2_weight:
gen_images = outputs.get('gen_images%s_enc' % suffix, outputs['gen_images' + suffix])
target_images = inputs['images' + suffix][self.hparams.context_frames:]
if hparams.l1_weight:
gen_l1_loss = vp.losses.l1_loss(gen_images, target_images)
gen_losses["gen_l1_loss" + suffix] = (gen_l1_loss, hparams.l1_weight)
if hparams.l2_weight:
gen_l2_loss = vp.losses.l2_loss(gen_images, target_images)
gen_losses["gen_l2_loss" + suffix] = (gen_l2_loss, hparams.l2_weight)
if (hparams.l1_weight or hparams.l2_weight) and hparams.num_scales > 1:
raise NotImplementedError
if hparams.tv_weight:
gen_flows = outputs.get('gen_flows%s_enc' % suffix, outputs['gen_flows' + suffix])
flow_diff1 = gen_flows[..., 1:, :, :, :] - gen_flows[..., :-1, :, :, :]
flow_diff2 = gen_flows[..., :, 1:, :, :] - gen_flows[..., :, :-1, :, :]
# sum over the multiple transformations but take the mean for the other dimensions
gen_tv_loss = tf.reduce_mean(tf.reduce_sum(tf.abs(flow_diff1), axis=(-2, -1))) + \
tf.reduce_mean(tf.reduce_sum(tf.abs(flow_diff2), axis=(-2, -1)))
gen_losses['gen_tv_loss' + suffix] = (gen_tv_loss, hparams.tv_weight)
return gen_losses
|
from state.machine import BaseState
from helper import *
import state
class HuntPlayerState(BaseState):
def action(self, game_state):
#if game_state_helper.get_current_hp_count(game_state) <= 5:
# return state.GoHomeState(), None
my_pos = game_state_helper.get_my_position(game_state)
# find closest resource
poids, next_move = game_state_helper.get_closest_enemy(game_state)
if poids == -1:
return state.GatherResourcesState(), None
tile_content = game_state['parsedGameMap'][(my_pos + next_move).to_tuple()]
action = create_move_action(tile_content, next_move)
if poids == 1: # if we're on top of the resource
return state.AttackPlayerState(), None
else:
return None, action
|
# Import Python packages
import os
import glob
import csv
import black
def process_files():
# Join a set of event data stored in indiual csv file into one output file
# for parsing
# checking your current working directory
print(os.getcwd())
# Get your current folder and subfolder event data
filepath = os.getcwd() + "/event_data"
# Create a for loop to create a list of files and collect each filepath
for root, dirs, files in os.walk(filepath):
# join the file path and roots with the subdirectories using glob
file_path_list = glob.glob(os.path.join(root, "*"))
# print(file_path_list)
# #### Processing the files to create the data file csv that will be used for Apache Casssandra tables
# initiating an empty list of rows that will be generated from each file
full_data_rows_list = []
# for every filepath in the file path list
for f in file_path_list:
# reading csv file
with open(f, "r", encoding="utf8", newline="") as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
next(csvreader)
# extracting each data row one by one and append it
for line in csvreader:
# print(line)
full_data_rows_list.append(line)
# uncomment the code below if you would like to get total number of rows
# print(len(full_data_rows_list))
# uncomment the code below if you would like to check to see what the list of event data rows will look like
# print(full_data_rows_list)
# creating a smaller event data csv file called event_datafile_full csv that will be used to insert data into the \
# Apache Cassandra tables
csv.register_dialect("myDialect", quoting=csv.QUOTE_ALL, skipinitialspace=True)
with open("event_datafile_new.csv", "w", encoding="utf8", newline="") as f:
writer = csv.writer(f, dialect="myDialect")
writer.writerow(
[
"artist",
"firstName",
"gender",
"itemInSession",
"lastName",
"length",
"level",
"location",
"sessionId",
"song",
"userId",
]
)
for row in full_data_rows_list:
if row[0] == "":
continue
writer.writerow(
(
row[0],
row[2],
row[3],
row[4],
row[5],
row[6],
row[7],
row[8],
row[12],
row[13],
row[16],
)
)
# check the number of rows in your csv file
with open("event_datafile_new.csv", "r", encoding="utf8") as f:
print(sum(1 for line in f))
#%%
def test_query(query_in: str, session, should_limit=False, limit=5) -> None:
"""reads a query string, executes it on the given session
and print each returned row using black formatter
Args:
query_in:str query string expecting cassandra query language
session:session cassandra session object
should_limit:bool toggle for limiting number of returned rows
limit:int how many rows to return if limiting
"""
query = query_in
if should_limit:
query = f"{query_in} LIMIT {limit}"
try:
rows = session.execute(query)
except Exception as e:
print(e)
print("-" * 50)
print("Data Validation Query: ")
print(" " * 50)
print(query_in)
print("-" * 50)
print("Result: ")
print(" " * 50)
for row in rows:
# black was chosen ahead of pandas dataframe for printing
# as the format is more compact and better suited for low
# numer of rows with textual content.
print(black.format_str(repr(row), mode=black.Mode()))
print(" " * 50)
print("-" * 50)
def drop_table(tables: tuple, session) -> None:
for table in tables:
query = f"DROP TABLE IF EXISTS {table}"
try:
session.execute(query)
except Exception as e:
print(e)
def insert_music_library_col(
row, query_cols: tuple, table_name: str, session, col_name_file_map: dict
) -> None:
"""Construct query to insert row of data from pandas itetuple object using
list of columns into the named table and execute it with the
given cassandra session
Args:
row:pandas object row from pandas itertuple method
query_cols:tuple list of column names to insert
table_name:str name of the table to insert into
session cassandra session object
Returns:
None
"""
def _get_value_from_csv(row, key: str, col_name_file_map: dict):
"""Takes a row and extracts the value
using the defined column mapping
This saves us from having to deal with
column names as defined in the csv, we can
stick with our own naming convention
Args:
row:Row casssandra row object
key:str column name to get as defined in table names
"""
return row._asdict().get(col_name_file_map.get(key))
# get elements in query_cols as one string for insertion into query
query_cols_asstring = ", ".join(query_cols)
# compose query insert statement
query_insert = f"INSERT INTO {table_name} ({query_cols_asstring}) "
# for each element in query cols create value substitution magics
# to avoid having to match these to the number of columns we're inserting
subs = ", ".join(["%s" for x in query_cols])
# compose value insertion query string
values = f"VALUES ({subs})"
# get the data from row looking up the column names
cols = [
_get_value_from_csv(row, col_name, col_name_file_map) for col_name in query_cols
]
# execute the session, casting cols to tuple for compatability with execute method
try:
session.execute(
f"{query_insert}{values}",
tuple(cols),
)
except Exception as e:
print(e)
def create_table(
table_name: str,
session,
table_create_mapper: dict,
table_business_statements_mapper: dict,
) -> None:
"""Compose a query for creating a table based on table name and
looking up query based on table name in table_name_create_mapper
Args:
table_name:str name of the table to insert. also used for looking up query
session: cassandra session
table_create_mapper: dict with names of tables to insert and queries for cols and primary keys
query_statement:str (optional) - query business requirement
"""
print("-" * 50)
query = (
f"CREATE TABLE IF NOT EXISTS {table_name} {table_create_mapper.get(table_name)}"
)
print(f"Query business requirement :")
print(f"{table_business_statements_mapper.get(table_name)}")
print(" ")
print(f"Query for creating tables of {table_name} :")
print(" ")
print(query)
print(" ")
try:
session.execute(query)
except Exception as e:
print(e)
def drop_table(table_name: str, session) -> None:
"""Drop a table from cassandra
Args:
table_name:str name of the table to drop
session: cassandra session
"""
query = f"drop table {table_name}"
try:
# do we have to assign here?
rows = session.execute(query)
except Exception as e:
print(e)
def dict_to_insert_string(dict_in: dict, sep=", ") -> str:
"""Convert dict to sepearated string
Args:
dict_in:dict pair of strings in dict
sep:dict:optional separator in string
"""
try:
vals = [[k, v] for k, v in dict_in.items()]
vals = [" ".join(x) for x in vals]
vals = sep.join(vals)
except Exception as e:
print(e)
return vals
def construct_create_table_query(insert_pairs: str, primary_key: str) -> str:
"""Construct query for creating a table
Args:
insert_pairs:str string with colname type
primary_key_str primary key
"""
insert_query = f"({insert_pairs}, PRIMARY KEY ({primary_key}))"
return insert_query
|
import matplotlib
import matplotlib.pyplot as plt
import json as json
import dateutil.parser
import sys
import os
import numpy
if len(sys.argv) < 3:
print("Usage: python3 process_metrics.py <FILE_NAME> <BIN_SIZE_IN_SECONDS>")
sys.exit()
print("Opening file %s" % sys.argv[1], file = sys.stderr)
TOTAL_LINES = int(os.popen('cat %s | wc -l' % sys.argv[1]).read())
print("File is %d lines in total" % TOTAL_LINES, file = sys.stderr)
BIN_SIZE = int(sys.argv[2])
print("Bin size is %d seconds" % BIN_SIZE, file = sys.stderr)
metrics = {}
def update_average_metric(metrics, metric_name, bin, metric_bin_name):
metric = metrics[metric_name]
metric_bin = bin.get(metric_bin_name, {})
existing_average = metric_bin.get('average', 1)
existing_weight = metric_bin.get('weight', 0)
if metric_name == ':operation' or metric_name == 'first_word':
totals = metric_bin.get('totals', { })
total_for_type = totals.get(metric, 0)
total_for_type = total_for_type + 1
totals[metric] = total_for_type
metric_bin['totals'] = totals
else:
new_average = numpy.average([existing_average, metric], weights=[existing_weight, 1])
metric_bin['average'] = new_average
metric_bin['weight'] = existing_weight + 1
# Add this data point for p95 and p99 calculations
data = metric_bin.get('data_points', [])
data.append(metric)
metric_bin['data_points'] = data
# And now we can just go ahead and write all the changes out.
bin[metric_bin_name] = metric_bin
def process_bin(bin_index, dict):
bin = dict.get(bin_index, None)
print("Checking if we should process bin %r" % bin_index, file = sys.stderr)
if bin and not bin['processed']:
print("Processing bin %r" % bin_index, file = sys.stderr)
# aggregate all the data points for each metric
print("Keys I'm processing: %r" % bin.keys(), file = sys.stderr)
for metric in bin.keys():
if metric == 'processed' or metric == 'operation' or metric == 'action' or metric == 'tps':
continue
print("\tProcessing metric %s" % metric, file = sys.stderr)
data = bin[metric]['data_points']
# And now we need to compute 95th and 99th percentile
p95, p99 = numpy.percentile(data, [95, 99])
bin[metric]['p95'] = p95
bin[metric]['p99'] = p99
del bin[metric]['data_points']
bin['processed'] = True
bin['tps'] = bin['tps'] / BIN_SIZE
current_maximum_bin = 0
with open(sys.argv[1]) as f:
line = f.readline()
lines = 0
while line:
lines += 1
if lines % 10_000 == 0:
print("Processing line %d of %d" % (lines, TOTAL_LINES), file = sys.stderr)
metric = json.loads(line)
# First order of business is to go ahead and determine what bin this
# sucker falls into
timestamp = metric[':entry_timestamp']['^t']
bin_start = (timestamp // BIN_SIZE) * BIN_SIZE
# And now we can collect each metric we care about.
bin = metrics.get(bin_start, { 'processed': False, 'tps': 0 })
update_average_metric(metric, ':query_queue_latency', bin, 'queue_latency')
update_average_metric(metric, ':execution_time', bin, 'execution_time')
update_average_metric(metric, ':operation', bin, 'operation')
metric['first_word'] = metric[':query'].split()[0]
update_average_metric(metric, 'first_word', bin, 'action')
metrics[bin_start] = bin
# Now calculate when this query was started and add it to the
# corresponding bin for QPS calculations. Start time is equal to
# start timestamp plus queue latency.
metric['start_time'] = timestamp + metric[':query_queue_latency']
start_time_bin_start = metric['start_time'] // BIN_SIZE * BIN_SIZE
# And now that we have the bin, we add it to the TPS for that bin
start_time_bin = metrics.get(start_time_bin_start, { 'processed': False, 'tps': 0 })
start_time_bin['tps'] += 1
metrics[start_time_bin_start] = start_time_bin
# Finally, whenever the bin changes to a higher bin than it previously
# has, that means that a full bin has passed. In the case of almost
# all bin sizes, that's going to mean that all of the queries have
# finished executing from the bin before the previous bin, which means
# that we can now compute the p95 and p99 statistics for those bins.
if bin_start > current_maximum_bin:
process_bin(current_maximum_bin - BIN_SIZE, metrics)
current_maximum_bin = bin_start
line = f.readline()
# the last bin will always need to be processed, and possibly the one before it
# as well
process_bin(current_maximum_bin - BIN_SIZE, metrics)
process_bin(current_maximum_bin, metrics)
print("Total lines: %d" % lines, file = sys.stderr)
print("Metric totals: %r" % metrics, file = sys.stderr)
print("%r" % metrics)
|
#!/usr/bin/env python3
# https://www.hackerrank.com/challenges/python-division
# #python
import io
import sys
import unittest
def divide(first, second):
int_val = first // second
float_val = first / second
return int_val, float_val
def main():
first_in = int(input().strip())
second_in = int(input().strip())
int_out, float_out = divide(first_in, second_in)
print(int_out)
print(float_out)
if __name__ == '__main__': # pragma: no cover
main()
class TestCode(unittest.TestCase):
def generalized_test(self, which):
sys.stdin = open(__file__.replace('.py', f'.{which}.in'), 'r')
sys.stdout = io.StringIO()
expected = open(__file__.replace('.py', f'.{which}.out'), 'r')
main()
self.assertEqual(sys.stdout.getvalue(), expected.read())
for handle in [sys.stdin, sys.stdout, expected]:
handle.close()
def test_0(self):
self.generalized_test('0')
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='yoi',
version='0.0.1',
packages=find_packages('.'),
package_dir={'': '.'},
install_requires=[
# FIXME - read requirements.txt!
],
)
|
import importlib
import logging
import math
import sys
import click
import numpy as np
import tensorflow as tf
from flask import Flask
from flask import json
from flask import request
from nltk import word_tokenize
from experiment.config import load_config
from experiment.qa.data.models import Sentence
from experiment.qa.data.models import Token, TextItem
# we create a flask app for our microservice
app = Flask(__name__)
sess = None
data = None
config = None
model = None
@app.route('/rank', methods=['POST'])
def rerank():
n = int(request.args.get('n', 10)) # number of answers to return
input_data = request.get_json()
question = input_data['question'] # the input question
candidates = input_data['candidates'] # the input candidate answers
question_item = get_text_item(question) # tokenized question
candidate_items = [get_text_item(c) for c in candidates] # tokenized candidate answers
question_vectors = np.array(
[data.get_item_vector(question_item, config['global']['question_length'])] * len(candidate_items)
)
candidate_vectors = np.array(
[data.get_item_vector(a, config['global']['answer_length']) for a in candidate_items]
)
scores = []
q_weights = []
a_weights = []
batch_size = 128
for batch in range(int(math.ceil(len(candidate_items) / float(batch_size)))):
batch_indices = batch_size * batch, batch_size * (batch + 1)
batch_scores, batch_q_weights, batch_a_weights = sess.run(
[model.predict, model.question_importance_weight, model.answer_good_importance_weight],
feed_dict={
model.input_question: question_vectors[batch_indices[0]:batch_indices[1]],
model.input_answer_good: candidate_vectors[batch_indices[0]:batch_indices[1]],
model.dropout_keep_prob: 1.0,
})
scores += batch_scores.tolist()
q_weights += batch_q_weights.tolist()
a_weights += batch_a_weights.tolist()
# a list of candidate answer indices, sorted by score
sort_indices = [i for (s, i) in
sorted(zip(scores, [i for (i, _) in enumerate(candidates)]), key=lambda x: -x[0])[:n]]
# the result is a simple json object with the desired content
result = {
'question': {
'tokens': [t.text for t in question_item.sentences[0].tokens]
},
'answers': [
{
'tokens': [t.text for t in candidate_items[i].sentences[0].tokens],
'weights': a_weights[i],
'questionWeights': q_weights[i]
}
for i in sort_indices]
}
return json.dumps(result)
@app.route('/individual-weights', methods=['POST'])
def individual_weights():
input_data = request.get_json()
question = input_data['question'] # the input question
candidate = input_data['candidate'] # the input candidate answer
question_item = get_text_item(question) # tokenized question
candidate_item = get_text_item(candidate) # tokenized candidate
question_vectors = np.array(
[data.get_item_vector(question_item, config['global']['question_length'])]
)
candidate_vectors = np.array(
[data.get_item_vector(candidate_item, config['global']['answer_length'])]
)
q_weights, c_weights = sess.run(
[model.question_importance_weight, model.answer_good_importance_weight],
feed_dict={
model.input_question: question_vectors,
model.input_answer_good: candidate_vectors,
model.dropout_keep_prob: 1.0,
})
result = {
'question': {
'tokens': [t.text for t in question_item.sentences[0].tokens]
},
'candidate': {
'tokens': [t.text for t in candidate_item.sentences[0].tokens],
'weights': c_weights[0].tolist(),
'questionWeights': q_weights[0].tolist()
}
}
return json.dumps(result)
def get_text_item(text):
"""Converts a text into a tokenized text item
:param text:
:return:
"""
if config['data']['lowercased']:
text = text.lower()
question_tokens = [Token(t) for t in word_tokenize(text)]
question_sentence = Sentence(' '.join([t.text for t in question_tokens]), question_tokens)
return TextItem(question_sentence.text, [question_sentence])
#
# The following functions setup the tensorflow graph and perform the training process, if required.
# Furthermore, the webserver is started up
#
@click.command()
@click.argument('config_file')
@click.option('--port', type=int, default=5001,
help='the port on which the candidate ranking webserver will listen for connections')
def run(config_file, port):
"""This program is the starting point for every experiment. It pulls together the configuration and all necessary
experiment classes to load
"""
global sess, model, config, data
config = load_config(config_file)
config_global = config['global']
# setup a logger
logger = logging.getLogger('experiment')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setLevel(config['logger']['level'])
handler_stdout.setFormatter(formatter)
logger.addHandler(handler_stdout)
if 'path' in config['logger']:
handler_file = logging.FileHandler(config['logger']['path'])
handler_file.setLevel(config['logger']['level'])
handler_file.setFormatter(formatter)
logger.addHandler(handler_file)
logger.setLevel(config['logger']['level'])
# Allow the gpu to be used in parallel
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
if 'max_threads' in config_global:
sess_config.intra_op_parallelism_threads = config_global['max_threads']
# we allow to set the random seed in the config file for reproducibility. However, when running on GPU, results
# will still be nondeterministic (due to nondeterministic behavior of tensorflow)
if 'random_seed' in config_global:
seed = config_global['random_seed']
logger.info('Using fixed random seed'.format(seed))
np.random.seed(seed)
tf.set_random_seed(seed)
sess = tf.InteractiveSession(config=sess_config)
# We are now fetching all relevant modules. It is strictly required that these module contain a variable named
# 'component' that points to a class which inherits from experiment.Data, experiment.Experiment,
# experiment.Trainer or experiment.Evaluator
data_module = config['data-module']
model_module = config['model-module']
training_module = config['training-module']
# The modules are now dynamically loaded
DataClass = importlib.import_module(data_module).component
ModelClass = importlib.import_module(model_module).component
TrainingClass = importlib.import_module(training_module).component
# We then wire together all the modules and start training
data = DataClass(config['data'], config_global, logger)
model = ModelClass(config['model'], config_global, logger)
training = TrainingClass(config['training'], config_global, logger)
# setup the data (validate, create generators, load data, or else)
logger.info('Setting up the data')
data.setup()
# build the model (e.g. compile it)
logger.info('Building the model')
model.build(data, sess)
# start the training process
logger.info('Starting the training process')
training.start(model, data, sess)
app.run(debug=False, port=port, host="0.0.0.0")
sess.close()
if __name__ == '__main__':
run()
|
import sys
import time
import schedule
from service import Request
sys.path.insert(0, '')
schedule.every(1).minutes.do(Request.getBalances)
while 1:
schedule.run_pending()
time.sleep(1)
|
# -*- coding:utf-8 -*-
import time
import pandas as pd
import numpy as np
from py4jhdfs import Py4jHdfs
from pyspark.sql import HiveContext, SQLContext
from pyspark.sql.types import *
def run_time_count(func):
"""
计算函数运行时间
装饰器:@run_time_count
"""
def run(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
print("[Info]: Function [{0}] run time is {1} second(s).".format(func.__name__, round(time.time() - start, 4)))
print('')
return result
return run
def df_head(hc_df, lines=5):
if hc_df:
df = hc_df.toPandas()
return df.head(lines)
else:
return None
def to_str(int_or_unicode):
if int_or_unicode in (None, '', np.nan, 'None', 'nan'):
return ''
try:
return str(int_or_unicode)
except:
return int_or_unicode
def to_int(str_or_int):
if str_or_int in (None, '', np.nan, 'None', 'nan'):
return None
try:
return int(float(str_or_int))
except:
return str_or_int
def to_float(int_or_float):
if int_or_float in (None, '', np.nan, 'None', 'nan'):
return np.nan
try:
return float(int_or_float)
except:
return int_or_float
@run_time_count
def csv_writer(sc, spark_df, save_path, sep=',', with_header=True, n_partitions=False, mode='overwrite', **kwargs):
"""
write spark dataframe to csv files with one line header.
exampe:
data = hc.read.csv('/hdfs/example.tsv',
sep='\t',
header=True,
).limit(50).repartition(10)
csv_writer(sc, data, save_path='/hdfs/example_output.csv', sep=',', n_partitions=None)
"""
ph = Py4jHdfs(sc)
if mode == 'overwrite':
try:
ph.rm(save_path)
except Exception as e:
print('[Warning]: %s dose not exist!' % save_path)
df = spark_df
df_header = df.columns
df_header = df_header[:-1]+[df_header[-1]+'\n']
df_header = sep.join(df_header)
ph.write(path=save_path+'/header.csv', contents=df_header, encode='utf-8', overwrite_or_append='overwrite')
if n_partitions:
df.coalesce(n_partitions).write.csv(save_path, sep=sep, header=False, mode='append', **kwargs)
else:
df.write.csv(save_path, sep=sep, header=False, mode='append', **kwargs)
print('[Info]: File Save Success!')
return True
@run_time_count
def csv_reader(sc, file_dir, sep=',', header_path='/header.csv', **kwargs):
"""
read csv files to spark dataframe with one line header.
exampe:
df = csv_reader(sc, '/hdfs/example.csv')
df.show(100)
"""
hc = HiveContext(sc)
ph = Py4jHdfs(sc)
files = ph.ls(file_dir,is_return=True)
files = [file_dir+x[0] for x in files]
files = list(filter(lambda x: '/part-' in x, files))
header = sc.textFile(file_dir+header_path).collect()[0].split(sep)
df = hc.read.csv(files, sep=sep, header=False, **kwargs)
for old_col,new_col in zip(df.columns,header):
df = df.withColumnRenamed(old_col,new_col)
return df
def save_dataframe_by_rdd(sc, data_df, save_path, with_header=True, sep='\t', n_partitions=10):
ph = Py4jHdfs(sc)
if not data_df:
print('[Warning]: There Is No Data To Save!')
return None
header = data_df.columns
header = sc.parallelize([sep.join(header)])
# print(header.collect())
print(save_path)
data_df = data_df.rdd.map(tuple).map(lambda r: tuple([to_str(r[i]) for i in range(len(r))])).map(
lambda r: sep.join(r))
if with_header:
data_df = header.union(data_df)
ph.rm(save_path)
# print(data_df.take(2))
data_df.coalesce(n_partitions).saveAsTextFile(save_path)
print('File Saved!')
def save_pandas_df(sc, pd_df, path, sep='\t'):
"""
把pandas DtaFrame存到HDFS
建议存储较小的文件100w行以内,否则可能会很慢
"""
ph = Py4jHdfs(sc)
if not isinstance(pd_df, pd.DataFrame) or pd_df.shape[0]<1:
print('[Warning]: There is no data to save.')
return False
for col in pd_df.columns:
pd_df[col] = pd_df[col].apply(lambda x: to_str(x))
header = pd_df.columns.tolist()
pd_df = pd_df.values.tolist()
pd_df = [header] + pd_df
pd_df = list(map(lambda x: sep.join(x), pd_df))
print('[Path]: %s' % path)
ph.write(path=path, contents=pd_df, encode='utf-8', overwrite_or_append='overwrite')
print('[Info]: File Saved!')
return True
def save_pandas_df_to_hive(sc, pd_df, table_name, mode='append'):
"""
把pandas.DtaFrame存到Hive表
"""
hc = HiveContext(sc)
if not isinstance(pd_df, pd.DataFrame):
print('[Warning]: Input data type is not pd.DataFrame.')
return False
hc_df = hc.createDataFrame(pd_df)
print(table_name)
hc_df.write.saveAsTable(name=table_name, mode=mode)
print('Table Saved!')
return True
def save_rdd_to_hdfs(sc, input_rdd, save_path, to_distinct=True, sep='\t'):
ph = Py4jHdfs(sc)
# print(type(input_rdd))
if not input_rdd:
print('[Warning]: There is no data to save!')
return False
rdd = input_rdd
if to_distinct:
rdd = rdd.distinct()
rdd = rdd.map(lambda r: tuple([to_str(r[i]) for i in range(len(r))])).map(lambda r: sep.join(r))
print(rdd.take(3))
print(rdd.count())
output_path = save_path
print('output_path:', output_path)
ph.rm(output_path)
rdd.saveAsTextFile(output_path)
print('File Saved!')
return True
def save_hive_data_to_hdfs(sc, select_sql, output_path, with_header=True, sep='\t', n_partitions=10, mode='overwrite',is_deduplication=False):
hc = HiveContext(sc)
data = hc.sql(select_sql)
if is_deduplication:
data = data.drop_duplicates()
print('[Path]: %s' % output_path)
csv_writer(sc, data, save_path=output_path, sep=sep, n_partitions=n_partitions, with_header=with_header)
# data.coalesce(n_partitions).write.csv(output_path,sep=sep,header=with_header,mode=mode)
print('[Info]: File saved!')
return True
DICT_SCHEMA = {'str': StringType(),
'object': StringType(),
'int': IntegerType(),
'int32': IntegerType(),
'int64': IntegerType(),
'long': LongType(),
'float': FloatType(),
'float32': FloatType(),
'float64': FloatType(),
}
DICT_DTYPE = {'str': to_str,
'object': to_str,
'int': to_int,
'int32': to_int,
'int64': to_int,
'long': to_int,
'float': to_float,
'float32': to_float,
'float64': to_float,
}
def create_schema_from_field_and_dtype(field_list, dtype_list):
schema = StructType([StructField(field, DICT_SCHEMA.get(dtype, StringType()), True) for field, dtype in zip(field_list, dtype_list)])
return schema
def transform_dtype(input_rdd_row, input_dtype):
return tuple([DICT_DTYPE[d](r) for r, d in zip(input_rdd_row, input_dtype)])
def replace_nans(input_rdd_row, to_replace=None):
def replace_nan(x, to_replace):
if x not in ('nan', 'None', 'NaN', ''):
return x
return to_replace
return tuple([replace_nan(i, to_replace) for i in input_rdd_row])
def write_rdd_to_hive_table_by_partition(hc, input_rdd, field_list, dtype_list, table_name, partition_by, mode='append', sep='\t'):
mode_map = {'append': 'into', 'overwrite': 'overwrite'}
schema = create_schema_from_field_and_dtype(field_list, dtype_list)
data = input_rdd
data = data.map(lambda r: replace_nans(r, None))
data = data.map(lambda r: transform_dtype(r, dtype_list))
# print(data.take(3))
data = hc.createDataFrame(data, schema=schema)
# print(['len(data.columns)',len(data.columns)])
data.registerTempTable("table_temp") # 创建临时表
# print(hc.sql('select * from table_temp limit 2').show())
insert_sql = " insert %s %s partition(%s=%s) select * from %s " % (mode_map[mode], table_name, partition_by['key'], partition_by['value'], 'table_temp')
# print(insert_sql)
hc.sql(insert_sql) # 插入数据
# data.write.mode(mode).format(format).partitionBy([partition_by['key']]).saveAsTable(table_name) # 有BUG无法使用
print('[Info]: Partition: %s=%s' % (partition_by['key'], partition_by['value']))
print('[Info]: Save Table Success!')
return True
def write_pd_dataframe_to_hive_table_by_partition(hc, input_df, field_list, dtype_list, table_name, partition_by, mode='append', sep='\t'):
if not isinstance(input_df,pd.DataFrame):
print('[Warning]: There is no data for date %s to save!' % partition_by['value'])
return False
mode_map = {'append': 'into', 'overwrite': 'overwrite'}
schema = create_schema_from_field_and_dtype(field_list, dtype_list)
data = input_df
for field,dtype in zip(field_list, dtype_list):
try:
data[field] = data[field].apply(lambda x: DICT_DTYPE[dtype](x))
except Exception as e:
print('[Error]: %s' % e)
data = hc.createDataFrame(data, schema=schema).drop('stat_day')
# print(['len(data.columns)',len(data.columns)])
data.registerTempTable("table_temp") # 创建临时表
# print(hc.sql('select * from table_temp limit 2').show())
insert_sql = " insert %s %s partition(%s=%s) select * from %s " % (mode_map[mode], table_name, partition_by['key'], partition_by['value'], 'table_temp')
# print(insert_sql)
hc.sql(insert_sql) # 插入数据
# data.write.mode(mode).format(format).partitionBy([partition_by['key']]).saveAsTable(table_name) # 有BUG无法使用
print('[Info]: Partition: %s=%s' % (partition_by['key'], partition_by['value']))
print('[Info]: Save Table Success!')
return True
if __name__ == '__main__':
# exampe:
from pyspark import SparkContext, SparkConf
from pyspark.sql import HiveContext, SQLContext
conf = SparkConf()
sc = SparkContext(conf=conf)
hc = HiveContext(sc)
sqlContext = SQLContext(sc)
ph = Py4jHdfs(sc)
data = hc.read.csv('/hdfs/example.tsv',
sep='\t',
header=True,
).limit(50).repartition(10)
csv_writer(sc, data, save_path='/hdfs/example_output.csv', sep=',', n_partitions=None)
df = csv_reader(sc, '/hdfs/example_output.csv')
|
'''
This contains some useful routines I need for finding and analysing
frequencies in pulsating star lightcurves
'''
import multiprocessing
import numpy as np
import f90periodogram
from scipy.interpolate import interpolate
try:
import pyopencl as cl
OPENCL = True
except ImportError:
print("opencl not available")
OPENCL = False
def find_nan(array):
" strips NaN from array and return stripped array"
# strip nan
valid = np.logical_not(np.isnan(array))
return valid
def fast_deeming(times, values, pad_n=None):
''' Interpolate time values to an even grid then run an FFT
returns (frequencies, amplitudes)
Input
-----
times : numpy array containing time values
values: numpy array containing measurements
pad_n : (optional) Calculate fft of this size. If this is larger than the
input data, it will be zero padded. See numpy.fft.fft's help for details.
Output
------
frequencies: numpy array containing frequencies
amplitudes : numpy array containing amplitudes
even_times : numpy array containing interpolated times
even_values: numpy array containing interpolated values
Details
-------
Time values are interpolated to an even grid from min(times) to max(times)
containing times.size values. Interpolation is done using linear spline
method.
NOTE: This may not give you results as precise as deeming(), the
interpolation may cause spurious effects in the fourier spectrum. This
method is however, very fast for large N, compared to deeming()
NOTE: This method strips nan from arrays first.
'''
valid = find_nan(values)
values = values[valid]
times = times[valid]
interpolator = interpolate.interp1d(times, values)
even_times = np.linspace(times.min(), times.max(), times.size)
even_vals = interpolator(even_times)
if pad_n:
amplitudes = np.abs(np.fft.fft(even_vals, pad_n))
else:
amplitudes = np.abs(np.fft.fft(even_vals, 2*even_vals.size))
amplitudes *= 2.0 / times.size
frequencies = np.fft.fftfreq(amplitudes.size,
d=even_times[1]-even_times[0])
pos = frequencies >= 0
return frequencies[pos], amplitudes[pos], even_times, even_vals
def periodogram_opencl(t, m, f):
''' Calculate the Deeming periodogram using numpy using a parallel O(N*N)
algorithm. Parallelisation is obtained via opencl and could be run on a
GPU.
Inputs:
t: numpy array containing timestamps
m: numpy array containing measurements
f: numpy array containing frequencies at which DFT must be
calculated
Returns:
amplitudes: numpy array of len(freqs) containing amplitudes of
periodogram
Note: This routine strips datapoints if it is nan
'''
valid = find_nan(m)
t = t[valid]
m = m[valid]
# create a context and a job queue
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
# create buffers to send to device
mf = cl.mem_flags
# input buffers
times_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=t)
mags_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=m)
freqs_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=f)
# output buffers
amps_buffer = cl.Buffer(ctx, mf.WRITE_ONLY, f.nbytes)
amps_g = np.empty_like(f)
kernel = '''
// Kernel to compute the deeming periodogram for a given frequency over a
// set of data
#define PYOPENCL_DEFINE_CDOUBLE
#pragma OPENCL EXTENSION cl_khr_fp64: enable
__kernel void periodogram(
__global const double *times_g,
__global const double *mags_g,
__global const double *freqs_g,
__global double *amps_g,
const int datalength) {
int gid = get_global_id(0);
double realpart = 0.0;
double imagpart = 0.0;
double pi = 3.141592653589793;
double twopif = freqs_g[gid]*2.0*pi;
for (int i=0; i < datalength; i++){
realpart = realpart + mags_g[i]*cos(twopif*times_g[i]);
imagpart = imagpart + mags_g[i]*sin(twopif*times_g[i]);
}
amps_g[gid] = 2.0*sqrt(pow(realpart, 2) + pow(imagpart, 2))/datalength;
}
'''
# read and compile the opencl kernel
prg = cl.Program(ctx, kernel)
try:
prg.build()
except:
print("Error:")
print(prg.get_build_info(ctx.devices,
cl.program_build_info.LOG))
raise
# call the function and copy the values from the buffer to a numpy array
prg.periodogram(queue, amps_g.shape, None,
times_g,
mags_g,
freqs_g,
amps_buffer,
np.int32(t.size))
cl.enqueue_copy(queue, amps_g, amps_buffer)
return amps_g
def periodogram_parallel(t, m, f, threads=None):
''' Calculate the Deeming periodogram using Fortran with OpenMP
'''
if not threads:
threads = 4
# strip nan
valid = find_nan(m)
t = t[valid]
m = m[valid]
ampsf90omp_2 = f90periodogram.periodogram2(t, m, f, t.size, f.size,
threads)
return ampsf90omp_2
def periodogram_numpy(t, m, freqs):
''' Calculate the Deeming periodogram using numpy using a serial O(N*N)
algorithm.
Inputs:
t: numpy array containing timestamps
m: numpy array containing measurements
freqs: numpy array containing frequencies at which DFT must be
calculated
Returns:
amplitudes: numpy array of len(freqs) containing amplitudes of
periodogram
Note: This routine strips datapoints if it is nan
'''
# strip nan
valid = find_nan(m)
t = t[valid]
m = m[valid]
# calculate the dft
amps = np.zeros(freqs.size, dtype='float')
twopit = 2.0*np.pi*t
for i, f in enumerate(freqs):
twopift = f*twopit
real = (m*np.cos(twopift)).sum()
imag = (m*np.sin(twopift)).sum()
amps[i] = real**2 + imag**2
amps = 2.0*np.sqrt(amps)/t.size
return amps
def deeming(times, values, frequencies=None, method='opencl',
opencl_max_chunk=10000):
''' Calculate the Deeming periodogram of values at times.
Inputs:
times: numpy array containing time_stamps
values: numpy array containing the measured values at times.
frequencies: optional numpy array at which the periodogram will be
calculated. If not given, (times.size) frequencies between 0 and
the nyquist frequency will be used.
method: One of 'opencl', 'openmp', 'numpy'.
'opencl' requires `pyopencl` to be present as well as a working
opencl driver. This method runs in parallel on the opencl device
and is potentially the fastest of the 3. This option is default.
'openmp' runs in parallel via openmp in fortran code. This can only
run on your CPU. It defaults to the number of cores in your machine.
'numpy' uses a serial implementation that only requires numpy to be
installed. This one is probably the slowest of the 3 options for
larger input data sizes
opencl_max_chunk: defaults to 10000. If you get "Cl out of resources"
error, make this smaller
Returns (frequency, amplitude) arrays.
'''
if frequencies is None:
# frequencies array not given. Create one
# find the smallest differnce between two successive timestamps and use
# that for the nyquist calculation
t = np.arange(times.size-1)
smallest = np.min(times[t+1] - times[t])
nyquist = 0.5 / smallest
frequencies = np.linspace(0, nyquist, times.size)
if method == 'opencl':
if OPENCL:
# split the calculation by frequency in chunks at most
# 10000 (for now)
chunks = (frequencies.size / opencl_max_chunk) + 1
f_split = np.array_split(frequencies, chunks)
amps_split = []
for f in f_split:
amps = periodogram_opencl(times, values, f)
amps_split.append(amps)
amps = np.concatenate(amps_split)
else:
print("WARNING! pyopencl not found. Falling back to openmp version")
cores = multiprocessing.cpu_count()
amps = periodogram_parallel(times, values, frequencies, cores)
elif method == 'openmp':
cores = multiprocessing.cpu_count()
amps = periodogram_parallel(times, values, frequencies, cores)
elif method == 'numpy':
amps = periodogram_numpy(times, values, frequencies)
else:
raise ValueError("{} is not a valid method!".format(method))
return frequencies, amps
def find_peak(frequencies, amplitudes, fmin=None, fmax=None):
''' Return the return (freq, amp) where amp is maximum'''
if fmin is None:
fmin = frequencies.min()
if fmax is None:
fmax = frequencies.max()
_f = np.logical_and(frequencies < fmax, frequencies > fmin)
ampmax = np.where(amplitudes[_f] == amplitudes[_f].max())
return float(frequencies[_f][ampmax]), float(amplitudes[_f][ampmax])
|
from __future__ import print_function
from constants import __THISDIR__, __ABRESTBANDNAME__, __VEGARESTBANDNAME__
from constants import __MJDPKNW__, __MJDPKSE__, __Z__
from constants import __MJDPREPK0NW__, __MJDPOSTPK0NW__
from constants import __MJDPREPK0SE__, __MJDPOSTPK0SE__
from . import lightcurve
from .kcorrections import compute_kcorrection, get_linfitmag, get_kcorrection
# from scipy import interpolate as scint
from scipy import optimize as scopt
import numpy as np
from matplotlib import pyplot as pl
from pytools import plotsetup
from matplotlib import rcParams
import os
from astropy.io import ascii
import sncosmo
def mk_color_curves_figure():
fig = plotsetup.halfpaperfig()
rcParams['text.usetex'] = False
fig.clf()
def line(x, slope, zpt):
return slope*x + zpt
nw, se = lightcurve.get_spock_data()
for event in ['nw','se']:
if event.lower()=='se':
sn = se
mjdpk = __MJDPKSE__
ax1 = pl.subplot(2,2,2)
ax2 = pl.subplot(2,2,4, sharex=ax1)
else:
sn = nw
mjdpk = __MJDPKNW__
ax1 = pl.subplot(2,2,1)
ax2 = pl.subplot(2,2,3, sharex=ax1)
mjd = sn['MJD']
trest = (mjd-mjdpk)/(1+__Z__)
tplot = np.arange(-3, +1, 0.1)
colorset = []
colorerrset = []
# fit a line to one well-sampled band for each event,
# between -3 and +1 rest-frame days from peak
maginterpdict = {}
for band in ['f814w', 'f125w', 'f160w']:
ib = np.where((sn['FILTER']==band.upper()) &
(trest>-3) & (trest<1))[0]
if len(ib) == 0:
continue
# m = np.append(np.array([31]), sn['MAG'][ib])
# merr = np.append(np.array([0.5]), sn['MAGERR'][ib])
m = sn['MAG'][ib]
merr = sn['MAGERR'][ib]
t = trest[ib]
popt, pcov = scopt.curve_fit(line, t, m, sigma=merr, p0=[-0.5, 50])
maginterpdict[band] = (popt, pcov)
# plot observer-frame measured mags in the top panel, with linear
# fit lines overlaid, then plot interpolated observer-frame colors in
# the bottom panel
for band1, color, marker in zip(
['f435w', 'f606w', 'f814w',
'f105w', 'f125w', 'f160w'],
['c', 'b', 'darkgreen', 'darkorange', 'r', 'darkred'],
['^', '>', 'v', 's', 'd', 'o']):
ib = np.where((sn['FILTER']==band1.upper()) &
(trest>-3) & (trest<1))[0]
if len(ib) == 0:
continue
m1 = sn['MAG'][ib]
merr1 = sn['MAGERR'][ib]
t1 = trest[ib]
ax1.errorbar(t1, m1, merr1, marker=marker, ls= ' ', color=color,
ecolor='k', elinewidth=0.5, capsize=1., mew=0.5,
label=band1.upper())
for band2, fillcolor in zip(['f814w', 'f160w'],
['darkgreen', 'r', 'darkred']):
if band1==band2: continue
if band1 in ['f140w','f160w'] and band2=='f125w': continue
if band2 not in maginterpdict: continue
#if band2=='f160w':
mec=color
color='w'
#else:
# mec=color
slope, intercept = maginterpdict[band2][0]
covmatrix = maginterpdict[band2][1]
slope_err, intercept_err = np.sqrt(np.diagonal(covmatrix))
slope_intercept_cov = covmatrix[0, 1]
fiterrfunc = lambda x: np.sqrt((slope_err * x)**2 +
intercept_err**2 +
(2 * x * slope_intercept_cov))
top = line(tplot, slope, intercept) + fiterrfunc(tplot)
bot = line(tplot, slope, intercept) - fiterrfunc(tplot)
ax1.fill_between(tplot, bot, top, color=fillcolor,
alpha=0.1, zorder=-200)
ax1.plot(tplot, line(tplot, slope, intercept),
marker=' ', ls='-', color=fillcolor,
label='__nolabel__', zorder=-100)
m2 = line(t1, slope, intercept)
merr2 = fiterrfunc(t1)
c12 = m1 - m2
c12err = np.sqrt(merr1**2 + merr2**2)
colorname = band1.upper() + '-' + band2.upper()
ax2.errorbar(t1, c12, c12err,
marker=marker, ls= ' ', color=color, mec=mec,
ecolor='k', elinewidth=0.5, capsize=1., mew=0.5,
label=colorname)
colorset += c12.tolist()
colorerrset += c12err.tolist()
meancolor = np.average(np.array(colorset),
weights=1/np.array(colorerrset)**2)
if event=='se':
ax1.legend(loc='lower right', fontsize='small', frameon=False,
ncol=1)
pl.setp(ax2.get_yticklabels(), visible=False)
ax2.legend(loc='lower right', fontsize='small', frameon=False,
ncol=1, borderpad=1.3)
else:
ax1.legend(loc='lower right', fontsize='small', frameon=False)
ax1.set_ylabel('AB Magnitude', labelpad=2)
ax2.set_ylabel('Color', labelpad=-5)
ax2.legend(loc='upper left', fontsize='small', frameon=False,
ncol=1, borderpad=1.8)
ax2.axhline(meancolor, ls='--', lw=0.5, zorder=-1000)
#ax2.text(1.7, meancolor+0.05, '%.1f' % np.abs(np.round(meancolor,1)),
# fontsize='small', ha='right', va='bottom')
ax2.set_xlabel('t$_{\\rm rest}$ (days)', labelpad=5)
ax1.text(0.08, 0.92, event.upper(), ha='left', va='top',
transform=ax1.transAxes, fontsize='large')
ax1.set_ylim(29.75, 26.2)
ax2.set_ylim(-0.9,2.48)
ax1.set_xlim(-2.4,0.9)
ax2.set_xlim(-2.4,0.9)
pl.setp(ax1.get_xticklabels(), visible=False)
if event=='se':
pl.setp(ax1.get_yticklabels(), visible=False)
fig = pl.gcf()
fig.subplots_adjust(wspace=0, hspace=0, left=0.13, bottom=0.13,
right=0.97, top=0.97)
return maginterpdict
def plot_colorcurve_binned( binsize=0.5 ):
""" make a plot showing the color curves of spockSE and NW"""
import sncosmo
from astropy.io import ascii
from matplotlib import ticker
from pytools import plotsetup
from astropy.table import Table,Column
plotsetup.paperfig()
sncosmo.plotting._cmap_wavelims = (4350, 15300)
# read in the data
nw = ascii.read('data/HST_FFSN_spockNW_phot.data', format='commented_header',header_start=-1, data_start=0 )
se = ascii.read('data/HST_FFSN_spockSE_phot.data', format='commented_header',header_start=-1, data_start=0 )
colorcurvedict = {
'mjd':[],'mjderr':[],'colorname':[],'color':[],'colorerr':[] }
for iax,src in zip([1,2],[nw, se]) :
if iax==1 :
xlim=[56669,56675]
ylim = [-0.5,0.6]
if iax==2:
xlim = [56898,56902]
ylim = [0,3]
ax = pl.gcf().add_subplot( 2, 1, iax )
if iax == 1 :
ax.text( 0.1,0.9, 'Spock-NW', ha='left', va='top', fontsize='large', transform=ax.transAxes )
if iax == 2 :
ax.text( 0.1,0.9, 'Spock-SE', ha='left', va='top', fontsize='large', transform=ax.transAxes )
mjd, mag, magerr = src['MJD'], src['MAG'], src['MAGERR']
bandname, flux, fluxerr = src['FILTER'], src['FLUX'], src['FLUXERR']
for thismjd in np.arange(xlim[0], xlim[1], binsize):
thisbindict = {}
for thisband in np.unique(bandname):
ithisbinband = np.where((bandname == thisband) &
(thismjd <= mjd) &
(mjd < thismjd + binsize))[0]
if len(ithisbinband) < 1:
continue
thisflux, thisfluxerr = weighted_avg_and_std(
flux[ithisbinband], 1/fluxerr[ithisbinband]**2)
thismag, thismagerr = weighted_avg_and_std(
mag[ithisbinband], 1/magerr[ithisbinband]**2)
bandpass = sncosmo.get_bandpass(thisband.lower())
thisbindict[thisband] = [bandpass.wave_eff,
thisflux, thisfluxerr,
thismag, thismagerr]
#if 56898.9 < thismjd < 56900.1 :
# import pdb; pdb.set_trace()
for key1, val1 in thisbindict.iteritems():
for key2, val2 in thisbindict.iteritems():
if key1 == key2:
continue
if val1[0] >= val2[0]:
continue
bandpair = [key1, key2]
magpair = [val1[3], val2[3]]
magerrpair = [val1[4], val2[4]]
thismjdmid = thismjd + binsize/2.
iblue = np.argmin([val1[0],val2[0]])
ired = np.argmax([val1[0],val2[0]])
colorcurvedict['colorname'].append(bandpair[iblue].upper() + '-' + bandpair[ired].upper())
colorcurvedict['color'].append( magpair[iblue] - magpair[ired] )
colorcurvedict['colorerr'].append( np.sqrt(magerrpair[iblue]**2 + magerrpair[ired]**2 ) )
colorcurvedict['mjd'].append( thismjdmid )
colorcurvedict['mjderr'].append( binsize/2. )
colortable = Table( colorcurvedict )
for colorname in np.unique( colortable['colorname'] ):
icolor = np.where(colortable['colorname'] == colorname)
mjd = colortable['mjd'][icolor]
color = colortable['color'][icolor]
colorerr = colortable['colorerr'][icolor]
mjderr = colortable['mjderr'][icolor]
if min(mjd) > xlim[1] : continue
if max(mjd) < xlim[0] : continue
ax.errorbar( mjd, color, colorerr, mjderr, marker='o', ls=' ', capsize=1, label=colorname )
ax.set_xlabel('MJD')
ax.set_ylabel('color (AB mag)')
ax.legend( numpoints=1 )
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.xaxis.set_major_locator( ticker.MultipleLocator( 1 ) )
ax.xaxis.set_minor_locator( ticker.MultipleLocator( 0.2 ) )
colortable.write( 'spock_colors.data', format='ascii.fixed_width' )
pl.draw()
return( colortable )
def plot_colorcurve_movingbin( binsize=0.5 ):
""" make a plot showing the color curves of spockSE and NW"""
import sncosmo
from astropy.io import ascii
from matplotlib import ticker
from pytools import plotsetup
from astropy.table import Table,Column
plotsetup.paperfig()
sncosmo.plotting._cmap_wavelims = (4350, 15300)
# read in the data
nw = ascii.read('data/HST_FFSN_spockNW_phot.data', format='commented_header',header_start=-1, data_start=0 )
se = ascii.read('data/HST_FFSN_spockSE_phot.data', format='commented_header',header_start=-1, data_start=0 )
colorcurvedict = {
'mjd':[],'mjderr':[],'colorname':[],'color':[],'colorerr':[] }
for iax,src in zip([1,2],[nw, se]) :
if iax==1 :
xlim=[56670,56674]
ylim = [-0.5,0.6]
if iax==2:
xlim = [56898.5,56902]
ylim = [0,3]
ax = pl.gcf().add_subplot( 2, 1, iax )
if iax == 1 :
ax.text( 0.1,0.9, 'Spock-NW', ha='left', va='top', fontsize='large', transform=ax.transAxes )
if iax == 2 :
ax.text( 0.1,0.9, 'Spock-SE', ha='left', va='top', fontsize='large', transform=ax.transAxes )
mjd, mag, magerr = src['MJD'], src['MAG'], src['MAGERR']
bandname, flux, fluxerr = src['FILTER'], src['FLUX'], src['FLUXERR']
# for each distinct observation...
for i in range(len(mjd)):
if flux[i] / fluxerr[i] < 3: continue
thismjd = mjd[i]
# find all other observations taken within the given MJD bin range
# ithisbin = np.where((mjd-binsize/2.<=mjd) & (mjd<mjd+binsize/2.))[0]
#if len(ithisbin)==1 : continue
#thisband = bandname[ithisbin]
#thisflux = flux[ithisbin]
#thisfluxerr = fluxerr[ithisbin]
#thismag = mag[ithisbin]
#thismagerr = magerr[ithisbin]
thisbindict = {}
for thisband in np.unique(bandname):
ithisbinband = np.where((bandname == thisband) &
(thismjd - binsize / 2. <= mjd) &
(mjd < thismjd + binsize / 2.) &
(magerr > 0) & (magerr < 0.33))[0]
if len(ithisbinband) < 1:
continue
thisflux, thisfluxerr = weighted_avg_and_std(
flux[ithisbinband], 1/fluxerr[ithisbinband]**2)
thismag, thismagerr = weighted_avg_and_std(
mag[ithisbinband], 1/magerr[ithisbinband]**2)
bandpass = sncosmo.get_bandpass(thisband.lower())
thisbindict[thisband] = [bandpass.wave_eff,
thisflux, thisfluxerr,
thismag, thismagerr]
if thismagerr>0.5:
import pdb; pdb.set_trace()
for key1, val1 in thisbindict.iteritems():
for key2, val2 in thisbindict.iteritems():
if key1 == key2:
continue
if val1[0] >= val2[0]:
continue
bandpair = [key1, key2]
magpair = [val1[3], val2[3]]
magerrpair = [val1[4], val2[4]]
thismjdmid = thismjd + binsize/2.
iblue = np.argmin([val1[0],val2[0]])
ired = np.argmax([val1[0],val2[0]])
colorcurvedict['colorname'].append(bandpair[iblue].upper() + '-' + bandpair[ired].upper())
colorcurvedict['color'].append( magpair[iblue] - magpair[ired] )
colorcurvedict['colorerr'].append( np.sqrt(magerrpair[iblue]**2 + magerrpair[ired]**2 ) )
colorcurvedict['mjd'].append( thismjdmid )
colorcurvedict['mjderr'].append( binsize/2. )
colortable = Table( colorcurvedict )
for colorname in np.unique( colortable['colorname'] ):
icolor = np.where(colortable['colorname'] == colorname)
mjd = colortable['mjd'][icolor]
color = colortable['color'][icolor]
colorerr = colortable['colorerr'][icolor]
mjderr = colortable['mjderr'][icolor]
if min(mjd) > xlim[1] : continue
if max(mjd) < xlim[0] : continue
ax.errorbar( mjd, color, colorerr, mjderr, marker='o', ls=' ', capsize=1, label=colorname )
ax.set_xlabel('MJD')
ax.set_ylabel('color (AB mag)')
ax.legend( numpoints=1 )
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.xaxis.set_major_locator( ticker.MultipleLocator( 1 ) )
ax.xaxis.set_minor_locator( ticker.MultipleLocator( 0.2 ) )
colortable.write( 'spock_colors.data', format='ascii.fixed_width' )
pl.draw()
return( colortable )
def compute_kcorrected_colors_from_observed_data():
""" calculate the observed colors in rest-frame bands,
applying K corrections.
:param restphotsys:
:return:
"""
# read in the observed spock data
nw, se = lightcurve.get_spock_data()
# read in the data file produced by linear fits to the pre-peak data
indatfile = os.path.join(__THISDIR__, 'data/magpk_trise_tfall.dat')
fitdat = ascii.read(indatfile, format='commented_header', header_start=-1,
data_start=0)
outfile = os.path.join(__THISDIR__, 'data/observed_colors_kcorrected.dat')
fout = open(outfile,'w')
print("# event trest c1abname c1ab c2abname c2ab "
"c1veganame c1vega c2veganame c2vega ", file=fout)
# for each observed data point from -6 days to 0 days in the observer
# frame, get the interpolated magnitudes from the linear fits
# and the observed magnitudes from the light curve data
for event in ['nw','se']:
if event.lower()=='se':
sn = se
mjdpkobs = __MJDPKSE__
mjdprepk0, mjdpostpk0 = __MJDPREPK0SE__, __MJDPOSTPK0SE__
iax = 2
else:
sn = nw
mjdpkobs = __MJDPKNW__
mjdprepk0, mjdpostpk0 = __MJDPREPK0NW__, __MJDPOSTPK0NW__
iax = 1
# NOTE: the rest-frame time is always defined relative to the
# *observed* MJD of peak brightness, not the assumed mjdpk
mjd = sn['MJD']
trest = (mjd-mjdpkobs)/(1+__Z__)
tprepk0 = (mjdprepk0-mjdpkobs)/(1+__Z__)
tpostpk0 = (mjdpostpk0-mjdpkobs)/(1+__Z__)
trestfit = fitdat['deltatpk']
mabfit = fitdat['mpk']
fitfilterlist = np.unique(fitdat['band'])
inearpeak = np.where((trest>tprepk0) & (trest<=0))[0]
for i in inearpeak:
# for each observed data point,
# construct a crude SED from the linear fits
# and this observed data point
obsbandname = sn['FILTER'][i].lower()
#if obsbandname in fitfilterlist:
# continue
source_wave = []
source_flux = []
trest = (sn['MJD'][i] - mjdpkobs)/(1+__Z__)
if trest<np.min(trestfit): continue
if trest>np.max(trestfit): continue
bandpass = sncosmo.get_bandpass(obsbandname)
source_wave.append(bandpass.wave_eff)
source_flux.append(sn['MAG'][i])
ifit = np.where((np.abs(trestfit - trest) < 0.1) &
(fitdat['event'] == event))[0]
for j in ifit:
bandpass = sncosmo.get_bandpass(fitdat['band'][j])
source_wave.append(bandpass.wave_eff)
source_flux.append(fitdat['mpk'][j])
isorted = np.argsort(source_wave)
source_wave = np.array(source_wave)
source_flux = np.array(source_flux)
abrestbandname = __ABRESTBANDNAME__[obsbandname]
vegarestbandname = __VEGARESTBANDNAME__[obsbandname]
abkcor = compute_kcorrection(abrestbandname, obsbandname,
__Z__, source_wave[isorted],
source_flux[isorted],
source_wave_unit='Angstrom',
source_flux_unit='magab',
obsphotsys='AB', restphotsys='AB',
verbose=False)
vegakcor = compute_kcorrection(vegarestbandname, obsbandname,
__Z__, source_wave[isorted],
source_flux[isorted],
source_wave_unit='Angstrom',
source_flux_unit='magab',
obsphotsys='AB', restphotsys='AB',
verbose=False)
# To construct a color measurement, we also need the interpolated
# magnitude from a redder band. We get these from the linear fits
# made to well sampled bands, reading in from the data file that
# was produced by the peak_luminosity_vs_time.py module
if obsbandname.lower().startswith('f1'):
fitbandname1 = 'f125w'
fitbandname2 = 'f160w'
else:
fitbandname1 = 'f435w'
fitbandname2 = 'f814w'
m1fit = get_linfitmag(event, fitbandname1, trest)
m2fit = get_linfitmag(event, fitbandname2, trest)
# now we get the K corrections to convert from the observer-frame
# band to the rest-frame band and the AB or Vega system
kcor1vega = get_kcorrection(event, fitbandname1, trest, restphotsys='Vega')
kcor2vega = get_kcorrection(event, fitbandname1, trest, restphotsys='Vega')
kcor1ab = get_kcorrection(event, fitbandname1, trest, restphotsys='AB')
kcor2ab = get_kcorrection(event, fitbandname1, trest, restphotsys='AB')
fitbandname1restAB = __ABRESTBANDNAME__[fitbandname1]
fitbandname2restAB = __ABRESTBANDNAME__[fitbandname2]
fitbandname1restVega = __VEGARESTBANDNAME__[fitbandname1]
fitbandname2restVega = __VEGARESTBANDNAME__[fitbandname2]
# Here is the observed magnitude in the bluer band
mobs = sn['MAG'][i]
# now we can compute the AB or Vega color in rest-frame band passes
cab1 = (mobs + abkcor) - (m1fit + kcor1ab)
cab2 = (mobs + abkcor) - (m2fit + kcor2ab)
cvega1 = (mobs + vegakcor) - (m1fit + kcor1vega)
cvega2 = (mobs + vegakcor) - (m2fit + kcor2vega)
obscolorname1 = '%s-%s' % (obsbandname.lower(), fitbandname1)
obscolorname2 = '%s-%s' % (obsbandname.lower(), fitbandname2)
abcolorname1 = '%s-%s'%(abrestbandname[4:], fitbandname1restAB[4:])
abcolorname2 = '%s-%s'%(abrestbandname[4:], fitbandname2restAB[4:])
vegacolorname1 = '%s-%s'%(vegarestbandname[7:], fitbandname1restVega[7:])
vegacolorname2 = '%s-%s'%(vegarestbandname[7:], fitbandname2restVega[7:])
print("%s %.1f %4s %6.1f %4s %6.1f %4s %6.1f %4s %6.1f " % (
event, trest, abcolorname1, cab1, abcolorname2, cab2,
vegacolorname1, cvega1, vegacolorname2, cvega2), file=fout)
fout.close()
|
import logging
import pandas as pd
from tqdm import tqdm
from .compare_kmer_content import compare_all_seqs
from .ensembl import get_sequence, get_rna_sequence_from_protein_id
# Create a logger
logging.basicConfig(format="%(name)s - %(asctime)s %(levelname)s: %(message)s")
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
QUANTITATIVE_KEYWORDS = {
"Gene-order conservation score",
"alignment coverage",
"dN with",
"dS with",
"%id",
}
ORTHOLOGY_ORDER = [
"No homology",
"ortholog_one2one",
"ortholog_one2many",
"ortholog_many2many",
]
# Colors from seaborn's default color_palette()
ORTHOLOGY_PALETTE = dict(
zip(
ORTHOLOGY_ORDER,
[
"grey",
"#1f77b4",
"#ff7f0e",
"#2ca02c",
],
)
)
class HomologyTable:
def __init__(self, data, species1, species2):
"""
Parameters
----------
data : pandas.DataFrame
Homology table from ENSEMBL Biomart (all columns)
species1 : str
Common name for species1, e.g. "mouse"
species2 : str
Common name for species2, e.g. "fly"
"""
self.data = data
self.species1 = species1
self.species2 = species2
# Extract column for homology type (e.g. one2one, one2many, many2many)
self.homology_type_col = [
x for x in self.data.columns if x.endswith("homology type")
][0]
self.data["is_homologue"] = self.data[self.homology_type_col].notnull()
self.species1_id_col = "Query protein or transcript ID"
self.species2_id_col = [
x
for x in self.data.columns
if x.endswith("protein or transcript stable ID")
][0]
self.quantitative_features = [
x
for x in self.data.columns
if any(keyword in x for keyword in QUANTITATIVE_KEYWORDS)
]
gene_order_col = [
col
for col in self.quantitative_features
if "Gene-order conservation score" in col
][0]
self._protein_coding_rows = self.data[gene_order_col].notnull()
self.protein_coding = self.data.loc[self._protein_coding_rows]
self.non_coding = self.data.loc[~self._protein_coding_rows]
@staticmethod
def get_sequences_from_ids(df, id_column, moltype, seqtype):
# ignore_errors=True skips deprecated IDs
if moltype == "protein" and seqtype != "protein":
seqs = [
get_rna_sequence_from_protein_id(x, type=seqtype, ignore_errors=True)
for x in tqdm(df[id_column])
]
else:
seqs = [get_sequence(x, ignore_errors=True) for x in tqdm(df[id_column])]
# Sanitize output based on deprecated ENSEMBL IDs that don't have
# sequences
id_seqs = [(ID, seq) for ID, seq in zip(df[id_column], seqs) if seq is not None]
return id_seqs
def _get_cross_species(self, random_subset, kmer_comparisons):
"""Add species columns and subset when species are different"""
id_to_species1 = pd.Series(
self.species1, index=random_subset[self.species1_id_col]
)
id_to_species2 = pd.Series(
self.species2, index=random_subset[self.species2_id_col]
)
id_to_species = pd.concat([id_to_species1, id_to_species2]).to_dict()
kmer_comparisons["species1"] = kmer_comparisons.id1.map(id_to_species)
kmer_comparisons["species2"] = kmer_comparisons.id2.map(id_to_species)
kmer_comparisons["species_species"] = (
kmer_comparisons.species1 + "_" + kmer_comparisons.species2
)
cross_species = kmer_comparisons.query("species1 != species2")
del kmer_comparisons
return cross_species
def _add_orthology_metadata(self, cross_species, random_subset):
"""Join with original metadata to get homology information"""
left_on = ["id1", "id2"]
right_on = [self.species1_id_col, self.species2_id_col]
cross_species_metadata = cross_species.merge(
random_subset, left_on=left_on, right_on=right_on, how="left"
)
return cross_species_metadata
def _subset_non_orthologous(self, cross_species_metadata, random_state):
"""Take random subsets of the non-homologous data and add back"""
# Take random subsets of the non-homologous data as there's several
# orders of magnitude more non-homologous pairs than homologous pairs
cross_species_metadata_subset_non_homologues = cross_species_metadata.groupby(
["id1", "ksize", "alphabet"], as_index=False, group_keys=False
).apply(
lambda x: x.loc[x["is_homologue"].isnull()].sample(
10, random_state=random_state
)
)
# Add the randomly sampled non homologous data back to the data that is
# homologous
cross_species_metadata_subset = pd.concat(
[
cross_species_metadata_subset_non_homologues,
cross_species_metadata.query("is_homologue == True"),
],
ignore_index=True,
)
return cross_species_metadata_subset
def compare_orthology(
self,
datatype,
n_subset=200,
random_state=0,
n_jobs=32,
n_background=100,
ksizes=list(range(2, 41)),
):
"""
Parameters
----------
datatype : str
Either 'protein_coding_peptide', 'protein_coding_cdna', or
'protein_coding_cds', 'non_coding'
n_subset
random_state
n_jobs
n_background : int
Number of background comparisons to do, per species1 sequence
ksizes
Returns
-------
homology_jaccard : pandas.DataFrame
Table of jaccard similarities of random subsets of proteins or
transcripts across species1 and species2, joined with the original
metadata table
"""
if datatype == "protein_coding_peptide":
data = self.protein_coding
moltype = "protein"
seqtype = "protein"
elif datatype == "protein_coding_cdna":
data = self.protein_coding
moltype = "DNA"
seqtype = "cdna"
elif datatype == "protein_coding_cds":
data = self.protein_coding
moltype = "DNA"
seqtype = "cds"
elif datatype == "non_coding":
data = self.non_coding
moltype = "DNA"
seqtype = "cdna"
else:
raise ValueError(
"Only 'protein_coding_peptide',"
" and 'protein_coding_cdna', 'protein_coding_"
"cds', and 'non_coding' datatypes are accepted"
)
logger.info(f"datatype: {datatype}, moltype: {moltype}, " f"seqtype: {seqtype}")
logger.info("Subsetting data")
random_subset = data.sample(n_subset, random_state=random_state)
logger.info("Getting sequences from IDs")
species1_id_seqs = self.get_sequences_from_ids(
random_subset, self.species1_id_col, moltype, seqtype
)
species2_id_seqs = self.get_sequences_from_ids(
random_subset, self.species2_id_col, moltype, seqtype
)
logger.info("K-merizing and calculating jaccard comparisons")
kmer_comparisons = compare_all_seqs(
species1_id_seqs,
species2_id_seqs,
n_jobs,
ksizes,
moltype=moltype,
n_background=n_background,
)
logger.info("Cleaning up k-mer comparisons for cross-species data")
cross_species = self._get_cross_species(random_subset, kmer_comparisons)
cross_species_metadata = self._add_orthology_metadata(
cross_species, random_subset
)
cross_species_metadata_fillna = cross_species_metadata.fillna("No homology")
return cross_species_metadata_fillna
|
#!/usr/bin/env python
# coding: utf-8
import tensorflow as tf
print(tf.__version__)
# 2.0.0-alpha0
from tensorflow.keras.applications import VGG16
conv_base = VGG16(weights='imagenet',
include_top=False,
input_shape=(150, 150, 3))
conv_base.summary()
#_________________________________________________________________
#Layer (type) Output Shape Param #
#=================================================================
#input_1 (InputLayer) [(None, 150, 150, 3)] 0
#_________________________________________________________________
#block1_conv1 (Conv2D) (None, 150, 150, 64) 1792
#_________________________________________________________________
#block1_conv2 (Conv2D) (None, 150, 150, 64) 36928
#_________________________________________________________________
#block1_pool (MaxPooling2D) (None, 75, 75, 64) 0
#_________________________________________________________________
#block2_conv1 (Conv2D) (None, 75, 75, 128) 73856
#_________________________________________________________________
#block2_conv2 (Conv2D) (None, 75, 75, 128) 147584
#_________________________________________________________________
#block2_pool (MaxPooling2D) (None, 37, 37, 128) 0
#_________________________________________________________________
#block3_conv1 (Conv2D) (None, 37, 37, 256) 295168
#_________________________________________________________________
#block3_conv2 (Conv2D) (None, 37, 37, 256) 590080
#_________________________________________________________________
#block3_conv3 (Conv2D) (None, 37, 37, 256) 590080
#_________________________________________________________________
#block3_pool (MaxPooling2D) (None, 18, 18, 256) 0
#_________________________________________________________________
#block4_conv1 (Conv2D) (None, 18, 18, 512) 1180160
#_________________________________________________________________
#block4_conv2 (Conv2D) (None, 18, 18, 512) 2359808
#_________________________________________________________________
#block4_conv3 (Conv2D) (None, 18, 18, 512) 2359808
#_________________________________________________________________
#block4_pool (MaxPooling2D) (None, 9, 9, 512) 0
#_________________________________________________________________
#block5_conv1 (Conv2D) (None, 9, 9, 512) 2359808
#_________________________________________________________________
#block5_conv2 (Conv2D) (None, 9, 9, 512) 2359808
#_________________________________________________________________
#block5_conv3 (Conv2D) (None, 9, 9, 512) 2359808
#_________________________________________________________________
#block5_pool (MaxPooling2D) (None, 4, 4, 512) 0
#=================================================================
#Total params: 14,714,688
#Trainable params: 14,714,688
#Non-trainable params: 0
#_________________________________________________________________
import os
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
base_dir = 'F:\zkl_repository\small_pics'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
datagen = ImageDataGenerator(rescale=1./255)
batch_size = 20
def extract_features(directory, sample_count):
features = np.zeros(shape=(sample_count, 4, 4, 512))
labels = np.zeros(shape=(sample_count))
generator = datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=batch_size,
class_mode='binary')
i = 0
for inputs_batch, labels_batch in generator:
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size : (i + 1) * batch_size] = features_batch
labels[i * batch_size : (i + 1) * batch_size] = labels_batch
i += 1
if i * batch_size >= sample_count:
# Note that since generators yield data indefinitely in a loop,
# we must `break` after every image has been seen once.
break
return features, labels
train_features, train_labels = extract_features(train_dir, 2000)
validation_features, validation_labels = extract_features(validation_dir, 1000)
test_features, test_labels = extract_features(test_dir, 1000)
train_features = np.reshape(train_features, (2000, 4 * 4 * 512))
validation_features = np.reshape(validation_features, (1000, 4 * 4 * 512))
test_features = np.reshape(test_features, (1000, 4 * 4 * 512))
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers
model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(train_features, train_labels,
epochs=30,
batch_size=20,
validation_data=(validation_features, validation_labels))
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
from tensorflow.keras import models
from tensorflow.keras import layers
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
#Model: "sequential_2"
#_________________________________________________________________
#Layer (type) Output Shape Param #
#=================================================================
#vgg16 (Model) (None, 4, 4, 512) 14714688
#_________________________________________________________________
#flatten (Flatten) (None, 8192) 0
#_________________________________________________________________
#dense_4 (Dense) (None, 256) 2097408
#_________________________________________________________________
#dense_5 (Dense) (None, 1) 257
#=================================================================
#Total params: 16,812,353
#Trainable params: 16,812,353
#Non-trainable params: 0
#_________________________________________________________________
print('This is the number of trainable weights '
'before freezing the conv base:', len(model.trainable_weights))
# trainable设置的目的是冻结网络,不再进行训练
conv_base.trainable = False
print('This is the number of trainable weights '
'after freezing the conv base:', len(model.trainable_weights))
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# 注意,不能增强验证数据
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# 目标目录
train_dir,
# 将所有图像的大小调整为150*150
target_size=(150, 150),
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50,
verbose=2)
model.save('cats_and_dogs_small_3.h5')
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
conv_base.summary()
#Model: "vgg16"
#_________________________________________________________________
#Layer (type) Output Shape Param #
#=================================================================
#input_1 (InputLayer) [(None, 150, 150, 3)] 0
#_________________________________________________________________
#block1_conv1 (Conv2D) (None, 150, 150, 64) 1792
#_________________________________________________________________
#block1_conv2 (Conv2D) (None, 150, 150, 64) 36928
#_________________________________________________________________
#block1_pool (MaxPooling2D) (None, 75, 75, 64) 0
#_________________________________________________________________
#block2_conv1 (Conv2D) (None, 75, 75, 128) 73856
#_________________________________________________________________
#block2_conv2 (Conv2D) (None, 75, 75, 128) 147584
#_________________________________________________________________
#block2_pool (MaxPooling2D) (None, 37, 37, 128) 0
#_________________________________________________________________
#block3_conv1 (Conv2D) (None, 37, 37, 256) 295168
#_________________________________________________________________
#block3_conv2 (Conv2D) (None, 37, 37, 256) 590080
#_________________________________________________________________
#block3_conv3 (Conv2D) (None, 37, 37, 256) 590080
#_________________________________________________________________
#block3_pool (MaxPooling2D) (None, 18, 18, 256) 0
#_________________________________________________________________
#block4_conv1 (Conv2D) (None, 18, 18, 512) 1180160
#_________________________________________________________________
#block4_conv2 (Conv2D) (None, 18, 18, 512) 2359808
#_________________________________________________________________
#block4_conv3 (Conv2D) (None, 18, 18, 512) 2359808
#_________________________________________________________________
#block4_pool (MaxPooling2D) (None, 9, 9, 512) 0
#_________________________________________________________________
#block5_conv1 (Conv2D) (None, 9, 9, 512) 2359808
#_________________________________________________________________
#block5_conv2 (Conv2D) (None, 9, 9, 512) 2359808
#_________________________________________________________________
#block5_conv3 (Conv2D) (None, 9, 9, 512) 2359808
#_________________________________________________________________
#block5_pool (MaxPooling2D) (None, 4, 4, 512) 0
#=================================================================
#Total params: 14,714,688
#Trainable params: 0
#Non-trainable params: 14,714,688
#_________________________________________________________________
# 重新训练网络
# 目的是模型微调,与特征提取互为补充,微调是指将其顶部几层"解冻",并将这解冻的几层
#和新增部分(本文是全练级分类器)联合训练,之所以叫微调,是因为它只是略微调整了所复用
#模型中更加抽象的部分
conv_base.trainable = True
# 只将第卷积块5解冻,进行微调
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-5),
metrics=['acc'])
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=100,
validation_data=validation_generator,
validation_steps=50)
# 保存模型
model.save('cats_and_dogs_small_4.h5')
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
def smooth_curve(points, factor=0.8):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
plt.plot(epochs,
smooth_curve(acc), 'bo', label='Smoothed training acc')
plt.plot(epochs,
smooth_curve(val_acc), 'b', label='Smoothed validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs,
smooth_curve(loss), 'bo', label='Smoothed training loss')
plt.plot(epochs,
smooth_curve(val_loss), 'b', label='Smoothed validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
test_loss, test_acc = model.evaluate_generator(test_generator, steps=50)
print('test acc:', test_acc)
|
from pyrosim.neuron import NEURON
from pyrosim.synapse import SYNAPSE
class NEURAL_NETWORK:
def __init__(self, nndfFileName):
self.neurons = {}
self.synapses = {}
f = open(nndfFileName, "r")
for line in f.readlines():
self.Digest(line)
f.close()
def Print(self):
self.Print_Sensor_Neuron_Values()
self.Print_Hidden_Neuron_Values()
self.Print_Motor_Neuron_Values()
print("")
# ---------------- Private methods --------------------------------------
def Add_Neuron_According_To(self, line):
neuron = NEURON(line)
self.neurons[neuron.Get_Name()] = neuron
def Add_Synapse_According_To(self, line):
synapse = SYNAPSE(line)
sourceNeuronName = synapse.Get_Source_Neuron_Name()
targetNeuronName = synapse.Get_Target_Neuron_Name()
self.synapses[sourceNeuronName, targetNeuronName] = synapse
def Digest(self, line):
if self.Line_Contains_Neuron_Definition(line):
self.Add_Neuron_According_To(line)
if self.Line_Contains_Synapse_Definition(line):
self.Add_Synapse_According_To(line)
def Line_Contains_Neuron_Definition(self, line):
return "neuron" in line
def Line_Contains_Synapse_Definition(self, line):
return "synapse" in line
def Print_Sensor_Neuron_Values(self):
print("sensor neuron values: ", end="")
for neuronName in sorted(self.neurons):
if self.neurons[neuronName].Is_Sensor_Neuron():
self.neurons[neuronName].Print()
print("")
def Print_Hidden_Neuron_Values(self):
print("hidden neuron values: ", end="")
for neuronName in sorted(self.neurons):
if self.neurons[neuronName].Is_Hidden_Neuron():
self.neurons[neuronName].Print()
print("")
def Print_Motor_Neuron_Values(self):
print("motor neuron values: ", end="")
for neuronName in sorted(self.neurons):
if self.neurons[neuronName].Is_Motor_Neuron():
self.neurons[neuronName].Print()
print("")
|
# Automatically generated by pb2py
# fmt: off
import protobuf as p
from .StellarAssetType import StellarAssetType
class StellarPaymentOp(p.MessageType):
MESSAGE_WIRE_TYPE = 211
def __init__(
self,
source_account: str = None,
destination_account: str = None,
asset: StellarAssetType = None,
amount: int = None,
) -> None:
self.source_account = source_account
self.destination_account = destination_account
self.asset = asset
self.amount = amount
@classmethod
def get_fields(cls):
return {
1: ('source_account', p.UnicodeType, 0),
2: ('destination_account', p.UnicodeType, 0),
3: ('asset', StellarAssetType, 0),
4: ('amount', p.SVarintType, 0),
}
|
from faker import Faker
# from arbeitsstunden.models import *
import random
from member.models import profile
'''
Erstellt die gewünschte Anzahl an fakeNews.
fakeNews sind dabei News müssen nicht zwangsläufig einen Sinn haben.
@return Array, bestehend aus Objekten mit Text, Titel
'''
def fakeNews(Anzahl: int) -> []:
fake = Faker()
Ergebnis = []
for _ in range(0,Anzahl):
newNews = {
"Text": fake.text(max_nb_chars=500),
"Titel": fake.sentence()
}
Ergebnis.append(newNews)
return Ergebnis
'''
Erstellt die gewünschte Anzahl an Fake Nutzern.
@return Array aus Objecten -> username, vorname, nachname, country, hometown, Email, HandyNummer
'''
def fakeNutzer(Anzahl: int) -> []:
fake = Faker()
Ergebnis = []
for _ in range(0, Anzahl):
newNews = {
"username": fake.simple_profile().get("username"),
"vorname": fake.first_name(),
"nachname": fake.last_name(),
"country": fake.country(),
"hometown": fake.city(),
"Email": fake.email(),
"HandyNummer": fake.phone_number()
}
Ergebnis.append(newNews)
return Ergebnis
# def fakeArbeitsstunden(AnzahlProKostenstelle: int):
# # aktuelle Season eintragen, falls noch nicht existent
# season = getCurrentSeason()
# fake = Faker()
# Nutzer = profile.objects.all()
# Kostenstelen_namen = [
# "Etage",
# "AGIV",
# "Amme",
# "Rudolf",
# "Halle Eschweiler",
# "Halle Aachen"
# ]
# # Kostenstellen einfügen
# for i in Kostenstelen_namen:
# new = costCenter(name=i, description="----")
# new.save()
# # projekte erstellen (pro Kostenstelle AnzahlProKostenstelle)
# for kostenstelle in costCenter.objects.all():
# for _ in range(AnzahlProKostenstelle):
# newProject = project(
# name=fake.text(max_nb_chars=60),
# description = fake.sentence(),
# season = getCurrentSeason()[0],
# costCenter = kostenstelle,
# aktiv = random.choice([True,False])
# )
# newProject.save()
# newProject.responsible.add(random.choice(Nutzer).user)
# for _ in range(random.randint(2,10)):
# # Work erstellen (zwischen 2 und 10 Pro Projekt)
# tempWork = work(
# name = fake.text(max_nb_chars=40),
# hours = random.randint(2, 30)
# )
# tempWork.save()
# for _ in range(random.randint(1,5)):
# # zufällig accounts zuordnen
# tempWork.employee.add(random.choice(Nutzer).workingHoursAccount)
# tempWork.save()
# newProject.parts.add(tempWork)
# pass
# newProject.save()
# pass
|
import speech_recognition
import pyttsx
speech_engine-pyttsx.init('sapi5')
speech_engine.setProperty('rate',150)
def speak(text):
speech_engine.say(text)
speech_engine.runAndWait()
recongnizer=speech_engine.Recognizer()
def listen():
with speech_engine.Microphone() as source:
recongnizer.adjust_for_ambient_noise(source)
audio-recongnizer.listen(source)
try:
return recognizer.recognize_sphinx(audio)
except speech_recognition.UnkownValueError:
print "Could not understand audio"
except speech_recognition.RequestError as e:
print "Recog error {0}".format(e)
return ""
speak("Say something!")
speak("I heard you say "+listen())
|
import os
import sys
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import csv
import locale
import re
locale.setlocale(locale.LC_ALL, '')
total_nodes = 0
DEFAULT_VALUE="3"
NODE_NUM = ["3", "5", "10", "26"]
def get_completion_time(startTime, endTime):
startTime = datetime.strptime(startTime, "%H:%M:%S.%f")
endTime = datetime.strptime(endTime, "%H:%M:%S.%f")
if endTime < startTime:
endTime += timedelta(days=1)
completionTime = endTime - startTime
return str(completionTime.seconds)
def parse_logs(runFiles):
avgIteration = []
for runFile in runFiles:
lines = [line.rstrip('\n') for line in open(runFile)]
iteration = 0
iterationTimes = []
startTime = lines[0][7:20]
for line in lines:
idx = line.find("Train Error")
if idx != -1:
endTime = line[7:20]
timeToComplete = get_completion_time(startTime, endTime)
startTime=endTime
iterationTimes.append(int(timeToComplete))
# print(iterationTimes)
if len(iterationTimes) == 0:
continue
avgIter = np.mean(np.array(iterationTimes))
avgIteration.append(avgIter)
avg = np.mean(np.array(avgIteration))
dev = np.std(np.array(avgIteration))
return avg,dev
if __name__ == '__main__':
if len(sys.argv) != 4:
print("Usage: [input log directory, parsed results directory, num_runs in input directory]")
sys.exit()
input_file_dir = sys.argv[1]
output_file_dir = sys.argv[2]
num_runs = int(sys.argv[3])
print(input_file_dir)
files = [x for x in os.listdir(input_file_dir)]
cols = [[NODE + "_avg", NODE + "_dev"] for NODE in NODE_NUM]
cols = [item for sublist in cols for item in sublist]
columns = ['committee_type'] + cols
results_df = pd.DataFrame(columns=columns)
row_list = []
row = ['noisers']
for NODES in NODE_NUM:
str_noiser_regex = NODES + "_" + DEFAULT_VALUE + "_" + DEFAULT_VALUE + "_\\d.log"
noiser_regex = re.compile(str_noiser_regex)
different_runs = [input_file_dir+file for file in files if re.match(noiser_regex, file)]
str_noiser_regex = "\\d+" + "_" + DEFAULT_VALUE + "_" + DEFAULT_VALUE + "_\\d.log"
print(NODES)
avg, dev = parse_logs(different_runs)
print(avg)
print(dev)
row.append(avg)
row.append(dev)
row_list.append(row)
print(row_list)
row = ['verifiers']
# Calculate for verifiers
for NODES in NODE_NUM:
str_regex = DEFAULT_VALUE + "_" + NODES + "_" + DEFAULT_VALUE + "_\\d.log"
regex = re.compile(str_regex)
different_runs = [input_file_dir+file for file in files if re.match(regex, file)]
print(NODES)
avg, dev = parse_logs(different_runs)
print(avg)
print(dev)
row.append(avg)
row.append(dev)
row_list.append(row)
row = ['aggregators']
# Calculate for aggregators
for NODES in NODE_NUM:
str_regex = DEFAULT_VALUE + "_" + DEFAULT_VALUE + "_" + NODES + "_\\d.log"
regex = re.compile(str_regex)
different_runs = [input_file_dir+file for file in files if re.match(regex, file)]
print(NODES)
avg, dev = parse_logs(different_runs)
print(avg)
print(dev)
row.append(avg)
row.append(dev)
row_list.append(row)
print(row_list)
results_df = pd.DataFrame(row_list,columns=columns)
results_df.to_csv('eval_vrf.csv',index=False)
|
import sys
import pytest
import unittest
from django_message_broker.server.utils import IntegerSequence, WeakPeriodicCallback, PeriodicCallback, MethodRegistry
class IntegerSequenceTests(unittest.TestCase):
def test_next(self):
integer_sequence = IntegerSequence().new_iterator()
self.assertEqual(next(integer_sequence), 0)
self.assertEqual(next(integer_sequence), 1)
def test_next_with_none_zero_start(self):
integer_sequence = IntegerSequence().new_iterator(start=10)
self.assertEqual(next(integer_sequence), 10)
self.assertEqual(next(integer_sequence), 11)
class PeriodicCallbackCase:
def start(self):
self.pc = PeriodicCallback(self._callback, callback_time=100)
self.pc.start()
def stop(self):
self.pc.stop()
del self.pc
def _callback(self):
pass
class WeakPeriodicCallbackCase:
def start(self):
self.pc = WeakPeriodicCallback(self._callback, callback_time=100)
self.pc.start()
def stop(self):
self.pc.stop()
del self.pc
def _callback(self):
pass
class WeakPeriodCallbackTests(unittest.TestCase):
def test_strong_ref(self):
self.periodic_callback = PeriodicCallbackCase()
self.assertEqual(sys.getrefcount(self.periodic_callback), 2)
# When we create the strong periodic callback the reference count
# on the class will increase because the periodic callback references
# the class.
self.periodic_callback.start()
self.assertEqual(sys.getrefcount(self.periodic_callback), 3)
# When we stop the callback the reference count should reduce again.
self.periodic_callback.stop()
self.assertEqual(sys.getrefcount(self.periodic_callback), 2)
def test_weak_ref(self):
self.weak_periodic_callback = WeakPeriodicCallbackCase()
self.assertEqual(sys.getrefcount(self.weak_periodic_callback), 2)
# A weak referenced periodic callback does not increase the reference
# count on the class.
self.weak_periodic_callback.start()
self.assertEqual(sys.getrefcount(self.weak_periodic_callback), 2)
# When the weak referenced periodic callback stops the reference count
# does not change.
self.weak_periodic_callback.stop()
self.assertEqual(sys.getrefcount(self.weak_periodic_callback), 2)
class MethodRegistryExceptionTests(unittest.TestCase):
def test_no_command_exception(self):
with pytest.raises(Exception):
class NoCommand:
class Registry(MethodRegistry):
pass
@Registry.register()
def f1(self):
pass
def test_duplicate_commands_exception(self):
with pytest.raises(Exception):
class DuplicateCommands:
class Registry(MethodRegistry):
pass
@Registry.register(command=b"one")
def f1(self):
pass
@Registry.register(command=b"one")
def f2(self):
pass
class MathsByName:
# Create a registry of math functions
class MathFunctions(MethodRegistry):
pass
def __init__(self):
# Bind methods to instance
self.maths = MathsByName.MathFunctions.get_bound_callables(self)
@MathFunctions.register(command=b"plusOne")
def f1(self, a):
return a + 1
@MathFunctions.register(command=b"sumTwo")
def f2(self, a, b):
return a + b
class MethodRegistryTest(unittest.TestCase):
def test_functions(self):
myMaths = MathsByName()
self.assertEqual(myMaths.maths[b"plusOne"](1), 2)
self.assertEqual(myMaths.maths[b"sumTwo"](1, 2), 3)
|
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run the formalisation pipeline."""
import contextlib
import functools
import os
from typing import Generator, Mapping, Optional, Tuple
from absl import flags
from absl import logging
import chex
from distribution_shift_framework.core import checkpointing
from distribution_shift_framework.core.datasets import data_utils
from distribution_shift_framework.core.metrics import metrics
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import experiment
from jaxline import utils
import ml_collections
import numpy as np
import optax
from six.moves import cPickle as pickle
import tensorflow as tf
import tensorflow_datasets as tfds
FLAGS = flags.FLAGS
def get_per_device_batch_size(total_batch_size: int) -> int:
num_devices = jax.device_count()
per_device_batch_size, ragged = divmod(total_batch_size, num_devices)
if ragged:
raise ValueError(
f'Global batch size {total_batch_size} must be divisible by the '
f'total number of devices {num_devices}')
return per_device_batch_size
class Experiment(experiment.AbstractExperiment):
"""Formalisation experiment."""
CHECKPOINT_ATTRS = {
'_params': 'params',
'_state': 'state',
'_opt_state': 'opt_state',
'_d_params': 'd_params',
'_d_state': 'd_state',
'_d_opt_state': 'd_opt_state',
'_adapt_params': 'adapt_params',
'_adapt_state': 'adapt_state'
}
def __init__(self, mode: str, init_rng: chex.PRNGKey,
config: ml_collections.ConfigDict):
"""Initializes experiment."""
super(Experiment, self).__init__(mode=mode, init_rng=init_rng)
self.mode = mode
self.config = config
self.init_rng = init_rng
# Set up discriminator parameters.
self._d_params = None
self._d_state = None
self._d_opt_state = None
# Double transpose trick to improve performance on TPUs.
self._should_transpose_images = (
config.enable_double_transpose and
jax.local_devices()[0].platform == 'tpu')
self._params = None # params
self._state = None # network state for stats like batchnorm
self._opt_state = None # optimizer state
self._adapt_params = None
self._adapt_state = None
self._label = config.data.label
with utils.log_activity('transform functions'):
self.forward = hk.transform_with_state(self._forward_fn)
self.eval_batch = jax.pmap(self._eval_batch, axis_name='i')
self.learner_fn = hk.transform_with_state(self._learner_fn)
self.adversarial_fn = hk.transform_with_state(self._adversarial_fn)
self.adapt_fn = self._adapt_fn
self.adaptor = None
self._update_func = jax.pmap(
self._update_func, axis_name='i', donate_argnums=(0, 1, 2))
if mode == 'train':
with utils.log_activity('initialize training'):
self._init_train(init_rng)
if getattr(self.config.training.learn_adapt, 'fn', None):
learner_adapt_fn = self.config.training.learn_adapt.fn
learner_adapt_kwargs = self.config.training.learn_adapt.kwargs
self._train_adapter = learner_adapt_fn(**learner_adapt_kwargs)
if self._adapt_params is None:
self._adapt_params = self._params
self._adapt_state = self._state
self._train_adapter.set(self._adapt_params, self._adapt_state)
else:
self._train_adapter = None
def optimizer(self) -> optax.GradientTransformation:
optimizer_fn = getattr(optax, self.config.optimizer.name)
return optimizer_fn(**self.config.optimizer.kwargs)
def _maybe_undo_transpose_images(self, images: chex.Array) -> chex.Array:
if self._should_transpose_images:
return jnp.transpose(images, (1, 2, 3, 0)) # NHWC -> HWCN.
return images
def _maybe_transpose_images(self, images: chex.Array) -> chex.Array:
if self._should_transpose_images:
# We use the double transpose trick to improve performance for TPUs.
# Note that there is a matching NHWC->HWCN transpose in the data pipeline.
# Here we reset back to NHWC like our model expects. The compiler cannot
# make this optimization for us since our data pipeline and model are
# compiled separately.
images = jnp.transpose(images, (3, 0, 1, 2)) # HWCN -> NHWC.
return images
def _postprocess_fn(
self,
inputs: data_utils.Batch,
rng: chex.PRNGKey
) -> data_utils.Batch:
if not hasattr(self.config, 'postprocessing'):
return inputs
postprocessing = getattr(self.config.postprocessing, 'fn', None)
if postprocessing is None:
return inputs
postprocess_fn = functools.partial(postprocessing,
**self.config.postprocessing.kwargs)
images = inputs['image']
labels = inputs['one_hot_label']
postprocessed_images, postprocessed_labels = postprocess_fn(
images, labels, rng=rng)
postprocessed_inputs = dict(**inputs)
postprocessed_inputs['image'] = postprocessed_images
postprocessed_inputs['one_hot_label'] = postprocessed_labels
return postprocessed_inputs
def _learner_fn(self, inputs: data_utils.Batch,
reduction='mean') -> Tuple[data_utils.ScalarDict, chex.Array]:
logits = self._forward_fn(inputs, is_training=True)
if getattr(self.config.data, 'label_property', '') in inputs.keys():
property_vs = inputs[self.config.data.label_property]
property_onehot = hk.one_hot(property_vs, self.config.data.n_properties)
else:
property_onehot = None
algorithm_fn = self.config.training.algorithm.fn
kwargs = self.config.training.algorithm.kwargs
scalars, logits = algorithm_fn(**kwargs)(
logits, inputs['one_hot_label'], property_vs=property_onehot,
reduction=reduction)
predicted_label = jnp.argmax(logits, axis=-1)
top1_acc = jnp.equal(predicted_label,
inputs[self._label]).astype(jnp.float32)
scalars['top1_acc'] = top1_acc.mean()
return scalars, logits
def learner_adapt_weights_fn(
self, params: optax.Params, state: optax.OptState,
old_params: optax.Params, old_state: optax.OptState,
inputs: data_utils.Batch, rng: chex.PRNGKey,
global_step: chex.Array
) -> Tuple[Tuple[data_utils.ScalarDict, chex.Array], optax.OptState]:
(scalars, logits), g_state = self._train_adapter(
fn=functools.partial(self.learner_fn.apply, reduction=None),
params=params, state=state, inputs=inputs, global_step=global_step,
rng=rng, old_params=old_params, old_state=old_state)
return (scalars, logits), g_state
def _adversarial_fn(self, logits: chex.Array,
inputs: data_utils.Batch) -> data_utils.ScalarDict:
if getattr(self.config.data, 'label_property', '') in inputs.keys():
property_vs = inputs[self.config.data.label_property]
property_onehot = hk.one_hot(property_vs, self.config.data.n_properties)
else:
property_onehot = None
one_hot_labels = inputs['one_hot_label']
algorithm_fn = self.config.training.algorithm.fn
kwargs = self.config.training.algorithm.kwargs
return algorithm_fn(**kwargs).adversary(
logits, property_vs=property_onehot, reduction='mean',
targets=one_hot_labels)
def _adapt_fn(self, params: optax.Params, state: optax.OptState,
rng: chex.PRNGKey, is_final_eval: bool = False):
adapt_fn = getattr(self.config.adapter, 'fn')
adapt_kwargs = getattr(self.config.adapter, 'kwargs')
forward_fn = functools.partial(self.forward.apply, is_training=True,
test_local_stats=False)
self.adaptor = adapt_fn(init_params=params,
init_state=state,
forward=jax.pmap(forward_fn, axis_name='i'),
**adapt_kwargs)
per_device_batch_size = get_per_device_batch_size(
self.config.training.batch_size)
ds = self._load_data(per_device_batch_size=per_device_batch_size,
is_training=False,
data_kwargs=self.config.data.test_kwargs)
for step, batch in enumerate(ds, 1):
logging.info('Updating using an adaptor function.')
self.adaptor.update(batch, batch[self.config.data.label_property], rng)
if (not is_final_eval and
step > getattr(self.config.adapter, 'num_adaptation_steps')):
break
def _forward_fn(self,
inputs: data_utils.Batch,
is_training: bool,
test_local_stats: bool = False) -> chex.Array:
model_constructor = self.config.model.constructor
model_instance = model_constructor(**self.config.model.kwargs.to_dict())
images = inputs['image']
images = self._maybe_transpose_images(images)
images = self.config.model.preprocess(images)
if isinstance(model_instance, hk.nets.MLP):
return model_instance(images)
return model_instance(images, is_training=is_training)
def _d_loss_fn(
self, d_params: optax.Params, d_state: optax.OptState, inputs: chex.Array,
logits: chex.Array,
rng: chex.PRNGKey
) -> Tuple[chex.Array, Tuple[data_utils.ScalarDict, optax.OptState]]:
d_scalars, d_state = self.adversarial_fn.apply(d_params, d_state, rng,
logits, inputs)
if not d_scalars:
# No adversary.
return 0., (d_scalars, d_state)
scaled_loss = d_scalars['loss'] / jax.device_count()
d_scalars = {f'adv_{k}': v for k, v in d_scalars.items()}
return scaled_loss, (d_scalars, d_state)
def _run_postprocess_fn(self,
rng: chex.PRNGKey,
inputs: data_utils.Batch) -> data_utils.Batch:
inputs = self._postprocess_fn(inputs, rng)
return inputs
def _loss_fn(
self, g_params: optax.Params,
g_state: optax.OptState,
d_params: optax.Params,
d_state: optax.OptState,
inputs: chex.Array,
rng: chex.PRNGKey,
global_step: chex.Array,
old_g_params: Optional[optax.Params] = None,
old_g_state: Optional[optax.OptState] = None
) -> Tuple[chex.Array, Tuple[
data_utils.ScalarDict, chex.Array, data_utils.Batch, optax.OptState]]:
# Find the loss according to the generator.
if getattr(self.config.training.learn_adapt, 'fn', None):
# Use generator loss computed by a training adaptation algorithm.
(scalars, logits), g_state = self.learner_adapt_weights_fn(
params=g_params,
state=g_state,
old_params=old_g_params,
old_state=old_g_state,
rng=rng,
inputs=inputs,
global_step=global_step)
else:
(scalars, logits), g_state = self.learner_fn.apply(g_params, g_state, rng,
inputs)
d_scalars, _ = self.adversarial_fn.apply(d_params, d_state, rng, logits,
inputs)
# If there is an adversary:
if 'loss' in d_scalars.keys():
# Want to minimize the loss, so negate it.
adv_weight = self.config.training.adversarial_weight
scalars['loss'] = scalars['loss'] - d_scalars['loss'] * adv_weight
scalars.update({f'gen_adv_{k}': v for k, v in d_scalars.items()})
scaled_loss = scalars['loss'] / jax.device_count()
return scaled_loss, (scalars, logits, inputs, g_state)
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def _prepare_train_batch(self, rng: chex.PRNGKey,
batch: data_utils.Batch) -> data_utils.Batch:
noise_threshold = self.config.training.label_noise
if noise_threshold > 0:
random_labels = jax.random.randint(
rng[0],
shape=batch[self._label].shape,
dtype=batch[self._label].dtype,
minval=0,
maxval=self.config.data.n_classes)
mask = jax.random.uniform(rng[0],
batch[self._label].shape) < noise_threshold
batch[self._label] = (random_labels * mask +
batch[self._label] * (1 - mask))
batch['one_hot_label'] = hk.one_hot(
batch[self._label], self.config.data.n_classes)
return batch
def _init_train(self, rng: chex.PRNGKey):
self._train_input = utils.py_prefetch(self._build_train_input)
if self._params is None:
logging.info('Initializing parameters randomly rather than restoring'
' from checkpoint.')
batch = next(self._train_input)
batch['one_hot_label'] = hk.one_hot(batch[self._label],
self.config.data.n_classes)
# Initialize generator.
self._params, self._state = self._init_params(rng, batch)
opt_init, _ = self.optimizer()
self._opt_state = jax.pmap(opt_init)(self._params)
# Initialize discriminator.
bcast_rng = utils.bcast_local_devices(rng)
(_, dummy_logits), _ = jax.pmap(self.learner_fn.apply)(self._params,
self._state,
bcast_rng, batch)
self._d_params, self._d_state = self._init_d_params(
rng, dummy_logits, batch)
opt_init, _ = self.optimizer()
if self._d_params:
self._d_opt_state = jax.pmap(opt_init)(self._d_params)
else:
# Is empty.
self._d_opt_state = None
def _init_params(
self, rng: chex.PRNGKey,
batch: data_utils.Batch) -> Tuple[optax.Params, optax.OptState]:
init_net = jax.pmap(self.learner_fn.init)
rng = utils.bcast_local_devices(rng)
params, state = init_net(rng, batch)
if not self.config.pretrained_checkpoint:
return params, state
ckpt_data = checkpointing.load_model(
self.config.pretrained_checkpoint)
ckpt_params, ckpt_state = ckpt_data['params'], ckpt_data['state']
ckpt_params = utils.bcast_local_devices(ckpt_params)
ckpt_state = utils.bcast_local_devices(ckpt_state)
def use_pretrained_if_shapes_match(params, ckpt_params):
if params.shape == ckpt_params.shape:
return ckpt_params
logging.warning('Shape mismatch! Initialized parameter: %s, '
'Pretrained parameter: %s.',
params.shape, ckpt_params.shape)
return params
params = jax.tree_multimap(
use_pretrained_if_shapes_match, params, ckpt_params)
return params, ckpt_state
def _init_d_params(
self, rng: chex.PRNGKey, logits: chex.Array,
batch: data_utils.Batch) -> Tuple[optax.Params, optax.OptState]:
init_net = jax.pmap(self.adversarial_fn.init)
rng = utils.bcast_local_devices(rng)
return init_net(rng, logits, batch)
def _write_images(self, writer, global_step: chex.Array,
images: Mapping[str, chex.Array]):
global_step = np.array(utils.get_first(global_step))
images_to_write = {
k: self._maybe_transpose_images(utils.get_first(v))
for k, v in images.items()}
writer.write_images(global_step, images_to_write)
def _load_data(self,
per_device_batch_size: int,
is_training: bool,
data_kwargs: ml_collections.ConfigDict
) -> Generator[data_utils.Batch, None, None]:
with contextlib.ExitStack() as stack:
if self.config.use_fake_data:
stack.enter_context(tfds.testing.mock_data(num_examples=128))
ds = data_utils.load_dataset(
is_training=is_training,
batch_dims=[jax.local_device_count(), per_device_batch_size],
transpose=self._should_transpose_images,
data_kwargs=data_kwargs)
return ds
def _build_train_input(self) -> Generator[data_utils.Batch, None, None]:
per_device_batch_size = get_per_device_batch_size(
self.config.training.batch_size)
return self._load_data(per_device_batch_size=per_device_batch_size,
is_training=True,
data_kwargs=self.config.data.train_kwargs)
def _update_func(
self,
params: optax.Params,
state: optax.OptState,
opt_state: optax.OptState,
global_step: chex.Array,
batch: data_utils.Batch,
rng: chex.PRNGKey,
old_g_params: Optional[optax.Params] = None,
old_g_state: Optional[optax.OptState] = None
) -> Tuple[Tuple[optax.Params, optax.Params], Tuple[
optax.OptState, optax.OptState], Tuple[optax.OptState, optax.OptState],
data_utils.ScalarDict, data_utils.Batch]:
"""Updates parameters ."""
# Obtain the parameters and discriminators.
(g_params, d_params) = params
(g_state, d_state) = state
(g_opt_state, d_opt_state) = opt_state
################
# Generator.
################
# Compute the loss for the generator.
inputs = self._run_postprocess_fn(rng, batch)
grad_loss_fn = jax.grad(self._loss_fn, has_aux=True)
scaled_grads, (g_scalars, logits, preprocessed_inputs,
g_state) = grad_loss_fn(g_params, g_state, d_params, d_state,
inputs, rng, global_step,
old_g_params=old_g_params,
old_g_state=old_g_state)
# Update the generator.
grads = jax.lax.psum(scaled_grads, axis_name='i')
_, opt_apply = self.optimizer()
updates, g_opt_state = opt_apply(grads, g_opt_state, g_params)
g_params = optax.apply_updates(g_params, updates)
################
# Discriminator.
################
if not self._d_opt_state:
# No discriminator.
scalars = dict(global_step=global_step, **g_scalars)
return ((g_params, d_params), (g_state, d_state),
(g_opt_state, d_opt_state), scalars, preprocessed_inputs)
# Compute the loss for the discriminator.
grad_loss_fn = jax.grad(self._d_loss_fn, has_aux=True)
scaled_grads, (d_scalars, d_state) = grad_loss_fn(d_params, d_state, batch,
logits, rng)
# Update the discriminator.
grads = jax.lax.psum(scaled_grads, axis_name='i')
_, opt_apply = self.optimizer()
updates, d_opt_state = opt_apply(grads, d_opt_state, d_params)
d_params = optax.apply_updates(d_params, updates)
# For logging while training.
scalars = dict(
global_step=global_step,
**g_scalars,
**d_scalars)
return ((g_params, d_params), (g_state, d_state),
(g_opt_state, d_opt_state), scalars, preprocessed_inputs)
def step(self, global_step: chex.Array, rng: chex.PRNGKey, writer,
**unused_kwargs) -> chex.Array:
"""Perform one step of the model."""
batch = next(self._train_input)
batch = self._prepare_train_batch(rng, batch)
params, state, opt_state, scalars, preprocessed_batch = (
self._update_func(
params=(self._params, self._d_params),
state=(self._state, self._d_state),
opt_state=(self._opt_state, self._d_opt_state),
global_step=global_step,
batch=batch,
rng=rng,
old_g_params=self._adapt_params,
old_g_state=self._adapt_state))
(self._params, self._d_params) = params
(self._state, self._d_state) = state
(self._opt_state, self._d_opt_state) = opt_state
if self._train_adapter:
self._adapt_params, self._adapt_state = self._train_adapter.update(
self._params, self._state, utils.get_first(global_step))
images = batch['image']
preprocessed_images = preprocessed_batch['image']
if self.config.training.save_images:
self._write_images(writer, global_step,
{'images': images,
'preprocessed_images': preprocessed_images})
# Just return the tracking metrics on the first device for logging.
return utils.get_first(scalars)
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def _load_eval_data(
self,
per_device_batch_size: int) -> Generator[data_utils.Batch, None, None]:
return self._load_data(per_device_batch_size=per_device_batch_size,
is_training=False,
data_kwargs=self.config.data.test_kwargs)
def _full_eval(self, rng: chex.PRNGKey, scalars: data_utils.ScalarDict,
checkpoint_path: Optional[str] = None
) -> data_utils.ScalarDict:
if checkpoint_path:
ckpt_data = checkpointing.load_model(checkpoint_path)
params, state = ckpt_data['params'], ckpt_data['state']
params = utils.bcast_local_devices(params)
state = utils.bcast_local_devices(state)
else:
params, state = self._params, self._state
# Iterate over all the test sets.
original_subset = self.config.data.test_kwargs.load_kwargs.subset
for test_subset in getattr(self.config.data, 'test_sets', ('test',)):
self.config.data.test_kwargs.load_kwargs.subset = test_subset
test_scalars = jax.device_get(
self._eval_top1_accuracy(params, state, rng, is_final=True))
scalars.update(
{f'{test_subset}_{k}': v for k, v in test_scalars.items()})
self.config.data.test_kwargs.load_kwargs.subset = original_subset
return scalars
def evaluate(self, global_step: chex.Array, rng: chex.PRNGKey, writer,
**unused_args) -> data_utils.ScalarDict:
"""See base class."""
# Need to set these so `on_new_best_model` can do a full eval.
self._writer = writer
self._rng = rng
global_step = np.array(utils.get_first(global_step))
scalars = jax.device_get(
self._eval_top1_accuracy(self._params, self._state, rng))
if FLAGS.config.eval_specific_checkpoint_dir:
scalars = self._full_eval(rng, scalars,
FLAGS.config.eval_specific_checkpoint_dir)
logging.info('[Step %d] Eval scalars: %s', global_step, scalars)
return scalars
def on_new_best_model(self, best_state: ml_collections.ConfigDict):
scalars = self._full_eval(self._rng, {})
if self._writer is not None:
self._writer.write_scalars(best_state.global_step, scalars)
ckpt_data = {}
for self_key, ckpt_key in self.CHECKPOINT_ATTRS.items():
ckpt_data[ckpt_key] = getattr(self, self_key)
checkpoint_path = checkpointing.get_checkpoint_dir(FLAGS.config)
checkpointing.save_model(os.path.join(checkpoint_path, 'best.pkl'),
ckpt_data)
def _eval_top1_accuracy(self, params: optax.Params, state: optax.OptState,
rng: chex.PRNGKey, is_final: bool = False
) -> data_utils.ScalarDict:
"""Evaluates an epoch."""
total_batch_size = self.config.evaluation.batch_size
per_device_batch_size = total_batch_size
eval_data = self._load_eval_data(per_device_batch_size)
# If using an adaptive method.
if getattr(self.config.adapter, 'fn', None):
self.adapt_fn(params, state, rng, is_final_eval=is_final)
self.adaptor.set_up_eval()
# Accuracies for each set of corruptions.
labels = []
predicted_labels = []
features = []
for batch in eval_data:
if self.adaptor is not None:
logging.info('Running adaptation algorithm for evaluation.')
property_label = batch[self.config.data.label_property]
predicted_label, _ = self.adaptor.run(
self.eval_batch, property_label, inputs=batch, rng=rng)
else:
predicted_label, _ = self.eval_batch(params, state, batch, rng)
label = batch[self._label]
feature = batch[self.config.data.label_property]
# Concatenate along the pmapped direction.
labels.append(jnp.concatenate(label))
features.append(jnp.concatenate(feature))
predicted_labels.append(jnp.concatenate(predicted_label))
# And finally concatenate along the first dimension.
labels = jnp.concatenate(labels)
features = jnp.concatenate(features)
predicted_labels = jnp.concatenate(predicted_labels)
# Compute the metrics.
results = {}
for metric in self.config.evaluation.metrics:
logging.info('Evaluating metric %s.', str(metric))
metric_fn = getattr(metrics, metric, None)
results[metric] = metric_fn(labels, features, predicted_labels, None)
# Dump all the results by saving pickled results to disk.
out_dir = checkpointing.get_checkpoint_dir(FLAGS.config)
dataset = self.config.data.test_kwargs.load_kwargs.subset
results_path = os.path.join(out_dir, f'results_{dataset}')
if not tf.io.gfile.exists(results_path):
tf.io.gfile.makedirs(results_path)
# Save numpy arrays.
with tf.io.gfile.GFile(
os.path.join(results_path, 'results.pkl'), 'wb') as f:
# Using protocol 4 as it's the default from Python 3.8 on.
pickle.dump({'all_labels': labels, 'all_features': features,
'all_predictions': predicted_labels}, f, protocol=4)
return results
def _eval_batch(self, params: optax.Params, state: optax.OptState,
inputs: data_utils.Batch,
rng: chex.PRNGKey
) -> Tuple[data_utils.ScalarDict, chex.Array]:
"""Evaluates a batch."""
logits, _ = self.forward.apply(
params, state, rng, inputs, is_training=False)
inputs['one_hot_label'] = hk.one_hot(
inputs[self._label], self.config.data.n_classes)
(_, logits), _ = self.learner_fn.apply(params, state, rng, inputs)
softmax_predictions = jax.nn.softmax(logits, axis=-1)
predicted_label = jnp.argmax(softmax_predictions, axis=-1)
return predicted_label, logits
|
"""
This is the definition for a spot light
A spot light emits light to one directions with some angle around it.
"""
from .light import Light
from ..math import Vec3
from ..math import Ray
from ..renderer.rgbimage import *
class SpotLight(Light):
"""Class describing a spot light source"""
def __init__(self, position: Vec3, direction: Vec3, p: int, a = 0.02, b = 0.1, c = 0) -> None:
"""
Constructor:
param position: position of the light
param direction: direction of the light
param p: controls how much the spotlight is focussed
params a, b, c: quadratic equation coefficients for ad^2 + bd + c
"""
Light.__init__(self, "SpotLight")
self.position = position
self.direction = direction
self.p = p
self.dir_ray = Ray(position, self.direction - self.position)
self.a = a
self.b = b
self.c = c
def intensity(self, shadowray):
"""
Point intensity calculation:
param shadowray: ray from light to hitrecord point
"""
d = shadowray.direction.length()
f_att = np.clip(1 / (self.c + self.b * d + self.a * d * d), 0, 1)
cos_angle = self.direction.dot(-shadowray.direction) / (self.direction.length() * shadowray.direction.length())
return f_att * (cos_angle ** self.p)
|
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
Author: Mike Danielczuk
"""
import numpy as np
class State(object):
""" Abstract class for states """
pass
class ObjectState(State):
""" The state of an object
Attributes
----------
key : str
string identifying the object
mesh : Trimesh
stores geometry of the object
pose : RigidTransform
describes the pose of the object in the world
sim_id : int
id for the object in sim
"""
def __init__(self,
key,
mesh,
pose=None,
sim_id=-1):
self.key = key
self.mesh = mesh
self.pose = pose
self.sim_id = sim_id
@property
def center_of_mass(self):
return self.mesh.center_mass
@property
def density(self):
return self.mesh.density
class HeapState(State):
""" State of a set of objects in a heap.
Attributes
----------
obj_states : list of ObjectState
state of all objects in a heap
"""
def __init__(self, workspace_states, obj_states, metadata={}):
self.workspace_states = workspace_states
self.obj_states = obj_states
self.metadata = metadata
@property
def workspace_keys(self):
return [s.key for s in self.workspace_states]
@property
def workspace_meshes(self):
return [s.mesh for s in self.workspace_states]
@property
def workspace_sim_ids(self):
return [s.sim_id for s in self.workspace_states]
@property
def obj_keys(self):
return [s.key for s in self.obj_states]
@property
def obj_meshes(self):
return [s.mesh for s in self.obj_states]
@property
def obj_sim_ids(self):
return [s.sim_id for s in self.obj_states]
@property
def num_objs(self):
return len(self.obj_keys)
def __getitem__(self, key):
return self.state(key)
def state(self, key):
try:
return self.obj_states[self.obj_keys.index(key)]
except:
try:
return self.workspace_states[self.workspace_keys.index(key)]
except:
logging.warning('Object %s not in pile!')
return None
class CameraState(State):
""" State of a camera.
Attributes
----------
mesh : Trimesh
triangular mesh representation of object geometry
pose : RigidTransform
pose of camera with respect to the world
intrinsics : CameraIntrinsics
intrinsics of the camera in the perspective projection model.
"""
def __init__(self,
frame,
pose,
intrinsics):
self.frame = frame
self.pose = pose
self.intrinsics = intrinsics
@property
def height(self):
return self.intrinsics.height
@property
def width(self):
return self.intrinsics.width
@property
def aspect_ratio(self):
return self.width / float(self.height)
@property
def yfov(self):
return 2.0 * np.arctan(self.height / (2.0 * self.intrinsics.fy))
class HeapAndCameraState(object):
""" State of a heap and camera. """
def __init__(self, heap_state, cam_state):
self.heap = heap_state
self.camera = cam_state
@property
def obj_keys(self):
return self.heap.obj_keys
@property
def num_objs(self):
return self.heap.num_objs
|
from distutils.core import setup
setup(
name='mgprint',
packages=['mgprint'],
version='0.1',
license='MIT',
description='Magic printer for Python CLI',
author='Fernando Olivera',
author_email='ferxxel@gmail.com',
url='https://github.com/fergeek/mgprint',
download_url='https://github.com/fergeek/mgprint/archive/v0.1.tar.gz',
python_requires='>=2.7',
keywords=[
'mgprint', 'cprint',
'print', 'magicprint',
'magic', '8bits',
'256bits', '256color',
'16bits', 'cli', 'hex',
'clicolor', 'rgb', 'html',
'console', 'terminal',
'easy', 'stable', 'linux',
'win', 'cmd', 'macos',
'clicolor', 'cprint',
'color', 'ascii'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'],
)
|
import writer_main.writer_pkg as pkg
def init():
pkg.init()
|
import Algorithmia
import spacy
"""
This package set comes preinstalled with every available small language package provided by spacy.
Pick your language from the following list: 'en', 'es', 'pt', 'fr', 'it', 'de, and 'nl'
You may change the language at runtime, but bear in mind that you'll get hit with some performance loss.
Example Input:
"I like New York in Autumn, the trees are beautiful."
Expected Output:
{
"entities found": [
{"label": "GPE", "text": "New York"},
{"label": "DATE", "text": "Autumn"}
]
}
"""
LANG = "en"
def load_spacy_lang(language):
lang_model = spacy.load(language)
return lang_model
def apply(input):
"""
This algorithm performs "Named Entity Recognition" on the sample input document.
It expects the input to be an escaped string.
:param input: An escaped string, in the language defined by $LANG.
:return: a list of detected entities.
"""
document = nlp(input)
named_entities = []
for ent in document.ents:
entity = {"label": ent.label_, "text": ent.text}
named_entities.append(entity)
output = {"entities found": named_entities}
return output
nlp = load_spacy_lang(LANG)
|
import numpy as np
import pandas as pd
from bokeh.models import Band, HoverTool
from tqdm import tqdm
import timeit
import warnings
from copy import deepcopy
from scipy.stats import norm
import time
import multiprocessing
from joblib import Parallel, delayed
from copy import deepcopy, copy
from bokeh.plotting import ColumnDataSource, figure
import scipy
from scipy import interp
from sklearn import metrics
from sklearn.metrics import confusion_matrix, roc_auc_score
from sklearn.utils import resample
from ..utils import binary_metrics, dict_median, smooth
from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label
import numpy as np
import pandas as pd
from bokeh.models import Band, HoverTool
from tqdm import tqdm
import timeit
from copy import deepcopy
from scipy.stats import norm
import time
import multiprocessing
from joblib import Parallel, delayed
from copy import deepcopy, copy
from bokeh.plotting import ColumnDataSource, figure
import scipy
from scipy import interp
from sklearn import metrics
from sklearn.metrics import confusion_matrix, roc_auc_score
from sklearn.utils import resample
from ..utils import binary_evaluation
def roc(Y, stat, test=None, bootnum=100, legend=True, grid_line=False, label_font_size="10pt", xlabel="1 - Specificity", ylabel="Sensitivity", width=320, height=315, method='BCA', plot='data', legend_basic=False):
# Set positive
auc_check = roc_auc_score(Y, stat)
if auc_check > 0.5:
pos = 1
else:
pos = 0
# Set Linspace for FPR
fpr_linspace = np.linspace(0, 1, 1000) # Make it 1000
# Calculate for STAT
fpr_stat, tpr_stat, _ = metrics.roc_curve(Y, stat, pos_label=pos, drop_intermediate=False)
auc_stat = metrics.auc(fpr_stat, tpr_stat)
# Drop intermediates when fpr = 0
tpr_stat = interp(fpr_linspace, fpr_stat, tpr_stat)
tpr_list = tpr_stat
# tpr0_stat = tpr_stat[fpr_stat == 0][-1]
# tpr_stat = np.concatenate([[tpr0_stat], tpr_stat[fpr_stat > 0]])
# fpr_stat = np.concatenate([[0], fpr_stat[fpr_stat > 0]])
# # Vertical averaging
# idx = [np.abs(i - fpr_stat).argmin() for i in fpr_linspace]
# tpr_list = np.array(tpr_stat[idx])
binary_stats_train_dict = binary_evaluation(Y, stat)
binary_stats_train = []
for key, value in binary_stats_train_dict.items():
binary_stats_train.append(value)
binary_stats_train = np.array(binary_stats_train)
binary_stats_train_boot = []
tpr_bootstat = []
if bootnum > 1:
for i in range(bootnum):
bootidx = resample(list(range(len(Y))), stratify=Y) # Default stratified
# Get Yscore and Y for each bootstrap and calculate
Yscore_boot = stat[bootidx]
Ytrue_boot = Y[bootidx]
fpr_boot, tpr_boot, _ = metrics.roc_curve(Ytrue_boot, Yscore_boot, pos_label=pos, drop_intermediate=False)
auc_boot = metrics.auc(fpr_boot, tpr_boot)
if auc_boot < 0.5:
fpr_boot, tpr_boot, _ = metrics.roc_curve(Ytrue_boot, Yscore_boot, pos_label=abs(1 - pos), drop_intermediate=False)
bstat_loop = binary_evaluation(Ytrue_boot, Yscore_boot)
bstat_list = []
for key, value in bstat_loop.items():
bstat_list.append(value)
binary_stats_train_boot.append(bstat_list)
# Drop intermediates when fpr = 0
tpr0_boot = tpr_boot[fpr_boot == 0][-1]
tpr_boot = np.concatenate([[tpr0_boot], tpr_boot[fpr_boot > 0]])
fpr_boot = np.concatenate([[0], fpr_boot[fpr_boot > 0]])
# Vertical averaging
idx = [np.abs(i - fpr_boot).argmin() for i in fpr_linspace]
tpr_bootstat.append(np.array(tpr_boot[idx]))
binary_stats_train_boot = np.array(binary_stats_train_boot)
if bootnum > 1:
if method == 'BCA':
binary_stats_jack_boot = []
jackidx = []
base = np.arange(0, len(Y))
for i in base:
jack_delete = np.delete(base, i)
jackidx.append(jack_delete)
tpr_jackstat = []
for i in jackidx:
# Get Yscore and Y for each bootstrap and calculate
Yscore_jack = stat[i]
Ytrue_jack = Y[i]
fpr_jack, tpr_jack, _ = metrics.roc_curve(Ytrue_jack, Yscore_jack, pos_label=pos, drop_intermediate=False)
auc_jack = metrics.auc(fpr_jack, tpr_jack)
if auc_boot < 0.5:
fpr_jack, tpr_jack, _ = metrics.roc_curve(Ytrue_jack, Yscore_jack, pos_label=abs(1 - pos), drop_intermediate=False)
jstat_loop = binary_evaluation(Ytrue_jack, Yscore_jack)
jstat_list = []
for key, value in jstat_loop.items():
jstat_list.append(value)
binary_stats_jack_boot.append(jstat_list)
# Drop intermediates when fpr = 0
tpr0_jack = tpr_boot[fpr_boot == 0][-1]
tpr_jack = np.concatenate([[tpr0_jack], tpr_jack[fpr_jack > 0]])
fpr_jack = np.concatenate([[0], fpr_jack[fpr_jack > 0]])
# Vertical averaging
idx = [np.abs(i - fpr_jack).argmin() for i in fpr_linspace]
tpr_jackstat.append(np.array(tpr_jack[idx]))
binary_stats_jack_boot = np.array(binary_stats_jack_boot)
if bootnum > 1:
if method == 'BCA':
tpr_ib = bca_method(tpr_bootstat, tpr_list, tpr_jackstat)
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
stat_ib = bca_method(binary_stats_train_boot, binary_stats_train, binary_stats_jack_boot)
elif method == 'Per':
tpr_ib = per_method(tpr_bootstat, tpr_list)
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
stat_ib = per_method(binary_stats_train_boot, binary_stats_train)
stat_ib = list(stat_ib)
elif method == 'CPer':
tpr_ib = cper_method(tpr_bootstat, tpr_list)
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
stat_ib = cper_method(binary_stats_train_boot, binary_stats_train)
stat_ib = list(stat_ib)
else:
raise ValueError("bootmethod has to be 'BCA', 'Perc', or 'CPer'.")
#stat_ib = np.array(stat_ib).T
#print(stat_ib)
# ROC up
# for i in range(len(tpr_ib.T)):
# for j in range(1, len(tpr_ib)):
# if tpr_ib[j, i] < tpr_ib[j - 1, i]:
# tpr_ib[j, i] = tpr_ib[j - 1, i]
# Get tpr mid
if method != 'Per':
tpr_ib[:, 2] = (tpr_ib[:, 0] + tpr_ib[:, 1]) / 2
for i in range(len(stat_ib)):
stat_ib[i][2] = binary_stats_train[i]
else:
tpr_ib = []
tpr_ib.append(tpr_list)
tpr_ib.append(tpr_list)
tpr_ib.append(tpr_list)
tpr_ib = np.array(tpr_ib)
tpr_ib = tpr_ib.T
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
tpr_ib = np.concatenate((tpr_ib, np.ones((1, 3))), axis=0) # Add end 1
binary_stats_train_dict = binary_evaluation(Y, stat)
binary_stats_train = []
for key, value in binary_stats_train_dict.items():
binary_stats_train.append(value)
stat_ib = []
stat_ib.append(binary_stats_train)
stat_ib.append(binary_stats_train)
stat_ib.append(binary_stats_train)
# Test if available
if test is not None:
test_y = test[0]
test_ypred = test[1]
fpr_test, tpr_test, _ = metrics.roc_curve(test_y, test_ypred, pos_label=pos, drop_intermediate=False)
auc_test = metrics.auc(fpr_test, tpr_test)
binary_stats_test_dict = binary_evaluation(test_y, test_ypred)
binary_stats_test = []
for key, value in binary_stats_test_dict.items():
binary_stats_test.append(value)
stat_ib.append(binary_stats_test)
# Drop intermediates when fpr = 0
tpr_test = interp(fpr_linspace, fpr_test, tpr_test)
tpr_test = np.insert(tpr_test, 0, 0) # Add starting 0
tpr_test = np.concatenate([tpr_test, [1]])
# Drop intermediates when fpr = 0
# tpr0_test = tpr_test[fpr_test == 0][-1]
# tpr_test = np.concatenate([[tpr0_test], tpr_test[fpr_test > 0]])
# fpr_test = np.concatenate([[0], fpr_test[fpr_test > 0]])
# # Vertical averaging
# idx_test = [np.abs(i - fpr_test).argmin() for i in fpr_linspace]
# tpr_test = tpr_test[idx_test]
# tpr_test = np.insert(tpr_test, 0, 0) # Add starting 0
fpr_linspace = np.insert(fpr_linspace, 0, 0) # Add starting 0
fpr_linspace = np.concatenate((fpr_linspace, [1])) # Add end 1
# if 'data' plot original data instead of median
if plot == 'data':
tpr_list_linspace = np.concatenate([[0], tpr_list]) # Add starting 0
tpr_list_linspace = np.concatenate([tpr_list_linspace, [1]]) # Add starting 0
tpr_ib[:, 2] = tpr_list_linspace
elif plot == 'median':
pass
else:
raise ValueError("plot must be 'data' or 'median'")
# Check upper limit / lower limit
for i in tpr_ib:
if i[0] > i[2]:
i[0] = i[2]
if i[1] < i[2]:
i[1] = i[2]
# Calculate AUC
auc_ib_low = metrics.auc(fpr_linspace, tpr_ib[:, 0])
auc_ib_upp = metrics.auc(fpr_linspace, tpr_ib[:, 1])
auc_ib_mid = metrics.auc(fpr_linspace, tpr_ib[:, 2])
auc_ib = np.array([auc_ib_low, auc_ib_upp, auc_ib_mid])
# Plot
spec = 1 - fpr_linspace
ci_ib = (tpr_ib[:, 1] - tpr_ib[:, 0]) / 2
ci_oob = (tpr_ib[:, 1] - tpr_ib[:, 0]) / 2
fig = figure(title="",
plot_width=width,
plot_height=height,
x_axis_label=xlabel,
y_axis_label=ylabel,
x_range=(-0.06, 1.06),
y_range=(-0.06, 1.06))
fig.line([0, 1], [0, 1], color="black", line_dash="dashed", alpha=0.8, line_width=1) # Equal Distribution Line
# Plot IB
data_ib = {"x": fpr_linspace,
"y": tpr_ib[:, 2],
"lowci": tpr_ib[:, 0],
"uppci": tpr_ib[:, 1],
"spec": spec,
"ci": ci_ib}
source_ib = ColumnDataSource(data=data_ib)
# Line IB
if bootnum > 1:
if legend_basic == True:
legend_ib = "Train"
else:
legend_ib = "Train (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0]) / 2)
figline_ib = fig.line("x",
"y",
color="green",
line_width=2.5,
alpha=0.7,
legend=legend_ib,
source=source_ib)
fig.add_tools(HoverTool(renderers=[figline_ib],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111} (+/- @ci{1.111})"), ]))
# CI Band IB
figband_ib = Band(base="x",
lower="lowci",
upper="uppci",
level="underlay",
fill_alpha=0.1,
line_width=0.5,
line_color="black",
fill_color="green",
source=source_ib)
fig.add_layout(figband_ib)
else:
if legend_basic == True:
legend_ib = "Train"
else:
legend_ib = "Train (AUC = {:.2f})".format(auc_ib[2])
figline_ib = fig.line("x",
"y",
color="green",
line_width=2.5,
alpha=0.7,
legend=legend_ib,
source=source_ib)
fig.add_tools(HoverTool(renderers=[figline_ib],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111} (+/- @ci{1.111})"), ]))
# Line Test
if test is not None:
if legend_basic == True:
legend_oob = "Test"
else:
legend_oob = "Test (AUC = {:.2f})".format(auc_test)
# Plot IB
data_test = {"x": fpr_linspace,
"y": tpr_test,
"spec": spec}
source_test = ColumnDataSource(data=data_test)
figline_test = fig.line("x",
"y",
color="orange",
line_width=2.5,
alpha=0.7,
legend=legend_oob,
source=source_test)
fig.add_tools(HoverTool(renderers=[figline_test],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111}"), ]))
if grid_line == False:
fig.xgrid.visible = False
fig.ygrid.visible = False
fig.legend.visible = False
if legend == True:
if legend_basic == True:
fig.legend.visible = True
fig.legend.location = "bottom_right"
else:
if test is None:
oob_text = "Train (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text_add = Label(x=0.38, y=0.02,
text=oob_text, render_mode='css', text_font_size= '9pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.12, bottom=0, left=0.30, right=1, color='white', alpha=1,line_color='black')
fig.circle(0.34,0.06,color='green',size=8)
else:
ib_text = "Train (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text = "Test (AUC = {:.2f})".format(auc_test)
ib_text_add = Label(x=0.38, y=0.10,
text=ib_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.38, y=0.02,
text=oob_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.20, bottom=0, left=0.30, right=1, color='white', alpha=1,line_color='black')
fig.circle(0.34,0.14,color='green',size=8)
fig.circle(0.34,0.06,color='purple',size=8)
if legend_basic == True:
return fig, stat_ib
else:
return fig
def roc_boot(Y,
stat,
bootstat,
bootstat_oob,
bootidx,
bootidx_oob,
method,
smoothval=0,
jackstat=None,
jackidx=None,
xlabel="1 - Specificity",
ylabel="Sensitivity",
width=320,
height=315,
label_font_size="10pt",
legend=True,
grid_line=False,
plot_num=0,
plot='data',
test=None,
legend_basic=False,
train=None,
ci_only=False):
# Set positive
auc_check = roc_auc_score(Y, stat)
if auc_check > 0.5:
pos = 1
else:
pos = 0
# Set Linspace for FPR
fpr_linspace = np.linspace(0, 1, 1000) # Make it 1000
# Calculate for STAT
fpr_stat, tpr_stat, _ = metrics.roc_curve(Y, stat, pos_label=pos, drop_intermediate=False)
auc_stat = metrics.auc(fpr_stat, tpr_stat)
tpr_stat = interp(fpr_linspace, fpr_stat, tpr_stat)
tpr_list = tpr_stat
# Calculate for BOOTSTAT (IB)
pos_loop = []
tpr_bootstat = []
for i in range(len(bootidx)):
# Get Yscore and Y for each bootstrap and calculate
Yscore_boot = bootstat[i]
Ytrue_boot = Y[bootidx[i]]
fpr_boot, tpr_boot, _ = metrics.roc_curve(Ytrue_boot, Yscore_boot, pos_label=pos, drop_intermediate=False)
auc_boot = metrics.auc(fpr_boot, tpr_boot)
if auc_boot > 0.5:
pos_loop.append(pos)
else:
fpr_boot, tpr_boot, _ = metrics.roc_curve(Ytrue_boot, Yscore_boot, pos_label=abs(1 - pos), drop_intermediate=False)
pos_loop.append(abs(1 - pos))
# Drop intermediates when fpr = 0
tpr0_boot = tpr_boot[fpr_boot == 0][-1]
tpr_boot = np.concatenate([[tpr0_boot], tpr_boot[fpr_boot > 0]])
fpr_boot = np.concatenate([[0], fpr_boot[fpr_boot > 0]])
# Vertical averaging
idx = [np.abs(i - fpr_boot).argmin() for i in fpr_linspace]
tpr_bootstat.append(np.array(tpr_boot[idx]))
# tpr_boot = interp(fpr_linspace, fpr_boot, tpr_boot)
# tpr_bootstat.append(tpr_boot)
if method == 'BCA':
tpr_jackstat = []
for i in range(len(jackidx)):
# Get Yscore and Y for each bootstrap and calculate
Yscore_jack = jackstat[i]
Ytrue_jack = Y[jackidx[i]]
fpr_jack, tpr_jack, _ = metrics.roc_curve(Ytrue_jack, Yscore_jack, pos_label=pos, drop_intermediate=False)
auc_jack = metrics.auc(fpr_jack, tpr_jack)
# if auc_jack < 0.5:
# fpr_jack, tpr_jack, _ = metrics.roc_curve(Ytrue_jack, Yscore_jack, pos_label=abs(1 - pos), drop_intermediate=False)
# Drop intermediates when fpr = 0
tpr0_jack = tpr_jack[fpr_jack == 0][-1]
tpr_jack = np.concatenate([[tpr0_jack], tpr_jack[fpr_jack > 0]])
fpr_jack = np.concatenate([[0], fpr_jack[fpr_jack > 0]])
# Vertical averaging
idx = [np.abs(i - fpr_jack).argmin() for i in fpr_linspace]
tpr_jackstat.append(np.array(tpr_jack[idx]))
#save_stat = [tpr_bootstat, tpr_list, tpr_jackstat, fpr_linspace]
if method == 'BCA':
tpr_ib = bca_method(tpr_bootstat, tpr_list, tpr_jackstat)
if method == 'Per':
tpr_ib = per_method(tpr_bootstat, tpr_list)
if method == 'CPer':
tpr_ib = cper_method(tpr_bootstat, tpr_list)
tpr_ib = np.array(tpr_ib)
# ROC up
if method != 'Per':
for i in range(len(tpr_ib.T)):
for j in range(1, len(tpr_ib)):
if tpr_ib[j, i] < tpr_ib[j - 1, i]:
tpr_ib[j, i] = tpr_ib[j - 1, i]
# # Check upper limit / lower limit
if method != 'Per':
for i in range(len(tpr_ib)):
if tpr_ib[i][0] > tpr_list[i]:
tpr_ib[i][0] = tpr_list[i]
if tpr_ib[i][1] < tpr_list[i]:
tpr_ib[i][1] = tpr_list[i]
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
tpr_ib = np.concatenate((tpr_ib, np.ones((1, 3))), axis=0) # Add end 1
# Get tpr mid
if method != 'Per':
tpr_ib[:, 2] = (tpr_ib[:, 0] + tpr_ib[:, 1]) / 2
#print('testing.')
# Calculate for OOB
auc_bootstat_oob = []
tpr_bootstat_oob = []
for i in range(len(bootidx_oob)):
# Get Yscore and Y for each bootstrap oob and calculate
Yscore_boot_oob = bootstat_oob[i]
Ytrue_boot_oob = Y[bootidx_oob[i]]
fpr_boot_oob, tpr_boot_oob, _ = metrics.roc_curve(Ytrue_boot_oob, Yscore_boot_oob, pos_label=pos, drop_intermediate=False)
auc_boot_oob = metrics.auc(fpr_boot_oob, tpr_boot_oob)
# if auc_boot_oob < 0.5:
# fpr_boot_oob, tpr_boot_oob, _ = metrics.roc_curve(Ytrue_boot_oob, Yscore_boot_oob, pos_label=abs(1-pos_loop[i]), drop_intermediate=False)
auc_boot_oob = metrics.auc(fpr_boot_oob, tpr_boot_oob)
auc_bootstat_oob.append(auc_boot_oob)
# Drop intermediates when fpr = 0
tpr0_boot_oob = tpr_boot_oob[fpr_boot_oob == 0][-1]
tpr_boot_oob = np.concatenate([[tpr0_boot_oob], tpr_boot_oob[fpr_boot_oob > 0]])
fpr_boot_oob = np.concatenate([[0], fpr_boot_oob[fpr_boot_oob > 0]])
# Vertical averaging
idx_oob = [np.abs(i - fpr_boot_oob).argmin() for i in fpr_linspace]
tpr_bootstat_oob.append(np.array(tpr_boot_oob[idx_oob]))
#tpr_boot_oob = interp(fpr_linspace, fpr_boot_oob, tpr_boot_oob)
#tpr_bootstat_oob.append(tpr_boot_oob)
# Get CI for tpr
tpr_oob_lowci = np.percentile(tpr_bootstat_oob, 2.5, axis=0)
tpr_oob_medci = np.percentile(tpr_bootstat_oob, 50, axis=0)
tpr_oob_uppci = np.percentile(tpr_bootstat_oob, 97.5, axis=0)
tpr_oob = np.array([tpr_oob_lowci, tpr_oob_uppci, tpr_oob_medci]).T
#tpr_oob = per_method(tpr_bootstat_oob, tpr_list)
auc_oob = per_method(auc_bootstat_oob, auc_stat)
tpr_oob = np.concatenate((np.zeros((1, 3)), tpr_oob), axis=0) # Add starting 0
tpr_oob = np.concatenate((tpr_oob, np.ones((1, 3))), axis=0) # Add end 1
# ROC up
if method != 'Per':
for i in range(len(tpr_oob.T)):
for j in range(1, len(tpr_oob)):
if tpr_oob[j, i] < tpr_oob[j - 1, i]:
tpr_oob[j, i] = tpr_oob[j - 1, i]
# Test if available
if test is not None:
test_y = test[0]
test_ypred = test[1]
fpr_test, tpr_test, _ = metrics.roc_curve(test_y, test_ypred, pos_label=pos, drop_intermediate=False)
auc_test = metrics.auc(fpr_test, tpr_test)
# Drop intermediates when fpr = 0
# tpr0_test= tpr_test[fpr_test == 0][-1]
# tpr_test = np.concatenate([[tpr0_test], tpr_test[fpr_test > 0]])
# fpr_test = np.concatenate([[0], fpr_test[fpr_test > 0]])
# # Vertical averaging
# idx_test = [np.abs(i - fpr_test).argmin() for i in fpr_linspace]
# tpr_test = tpr_test[idx_test]
tpr_test = interp(fpr_linspace, fpr_test, tpr_test)
tpr_test = np.insert(tpr_test, 0, 0) # Add starting 0
tpr_test = np.concatenate((tpr_test,[1]))
tpr_oob[:, 2] = tpr_test
# if 'data' plot original data instead of median
if train is not None:
fpr_stat, tpr_stat, _ = metrics.roc_curve(train[0], train[1], pos_label=pos, drop_intermediate=False)
tpr_stat = interp(fpr_linspace, fpr_stat, tpr_stat)
tpr_list = tpr_stat
if plot == 'data':
tpr_list_linspace = np.concatenate([[0], tpr_list]) # Add starting 0
tpr_list_linspace = np.concatenate([tpr_list_linspace,[1]]) # Add starting 0
tpr_ib[:,2] = tpr_list_linspace
elif plot == 'median':
pass
else:
pass
# else:
# raise ValueError("plot must be 'data' or 'median'")
fpr_linspace = np.insert(fpr_linspace, 0, 0) # Add starting 0
fpr_linspace = np.concatenate((fpr_linspace, [1])) # Add end 1
# Calculate AUC
auc_ib_low = metrics.auc(fpr_linspace, tpr_ib[:, 0])
auc_ib_upp = metrics.auc(fpr_linspace, tpr_ib[:, 1])
auc_ib_mid = metrics.auc(fpr_linspace, tpr_ib[:, 2])
auc_ib = np.array([auc_ib_low, auc_ib_upp, auc_ib_mid])
auc_oob_low = metrics.auc(fpr_linspace, tpr_oob[:, 0])
auc_oob_upp = metrics.auc(fpr_linspace, tpr_oob[:, 1])
auc_oob_mid = metrics.auc(fpr_linspace, tpr_oob[:, 2])
auc_oob = np.array([auc_oob_low, auc_oob_upp, auc_oob_mid])
# print(auc_ib)
# print(auc_oob)
# print("AUC IB {} ({},{})".format(auc_ib[2], auc_ib[0], auc_ib[1]))
# print("AUC OOB {} ({},{})".format(auc_oob[2], auc_oob[0], auc_oob[1]))
# Smooth if set
if smoothval > 1:
tpr_ib[:, 0] = smooth(tpr_ib[:, 0], smoothval)
tpr_ib[:, 1] = smooth(tpr_ib[:, 1], smoothval)
tpr_ib[:, 2] = smooth(tpr_ib[:, 2], smoothval)
tpr_oob[:, 0] = smooth(tpr_oob[:, 0], smoothval)
tpr_oob[:, 1] = smooth(tpr_oob[:, 1], smoothval)
tpr_oob[:, 2] = smooth(tpr_oob[:, 2], smoothval)
tpr_test = smooth(tpr_test, smoothval)
# Plot
spec = 1 - fpr_linspace
ci_ib = (tpr_ib[:, 1] - tpr_ib[:, 0]) / 2
ci_oob = (tpr_ib[:, 1] - tpr_ib[:, 0]) / 2
fig = figure(title="",
plot_width=width,
plot_height=height,
x_axis_label=xlabel,
y_axis_label=ylabel,
x_range=(-0.06, 1.06),
y_range=(-0.06, 1.06))
fig.line([0, 1], [0, 1], color="black", line_dash="dashed", alpha=0.8, line_width=1)
# Plot IB
data_ib = {"x": fpr_linspace,
"y": tpr_ib[:, 2],
"lowci": tpr_ib[:, 0],
"uppci": tpr_ib[:, 1],
"spec": spec,
"ci": ci_ib}
source_ib = ColumnDataSource(data=data_ib)
# Line IB
if plot_num in [0, 1, 2, 4]:
if legend_basic == True:
legend_text = "Train"
else:
legend_text = "IB (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0]) / 2)
if ci_only == False:
figline_ib = fig.line("x",
"y",
color="green",
line_width=2.5,
alpha=0.7,
legend=legend_text,
source=source_ib)
fig.add_tools(HoverTool(renderers=[figline_ib],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111} (+/- @ci{1.111})"), ]))
# CI Band IB
figband_ib = Band(base="x",
lower="lowci",
upper="uppci",
level="underlay",
fill_alpha=0.1,
line_width=0.5,
line_color="black",
fill_color="green",
source=source_ib)
fig.add_layout(figband_ib)
figlegend_ib = fig.rect([10],[20],[5],[5], color="green", fill_alpha=0.1, line_width=0.5, line_color="grey", legend="IB (95% CI)")
# Plot OOB
data_oob = {"x": fpr_linspace,
"y": tpr_oob[:, 2],
"lowci": tpr_oob[:, 0],
"uppci": tpr_oob[:, 1],
"spec": spec,
"ci": ci_oob}
source_oob = ColumnDataSource(data=data_oob)
# Line OOB
if plot_num in [0, 1, 3, 4]:
if legend_basic == True:
legend_text = "Test"
else:
legend_text = "OOB (AUC = {:.2f} +/- {:.2f})".format(auc_oob[2], (auc_oob[1] - auc_oob[0]) / 2)
if ci_only == False:
figline = fig.line("x",
"y",
color="orange",
line_width=2.5,
alpha=0.7,
legend=legend_text,
source=source_oob)
fig.add_tools(HoverTool(renderers=[figline],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111} (+/- @ci{1.111})"), ]))
# CI Band OOB
figband_oob = Band(base="x",
lower="lowci",
upper="uppci",
level="underlay",
fill_alpha=0.1,
line_width=0.5,
line_color="black",
fill_color="orange",
source=source_oob)
fig.add_layout(figband_oob)
figlegend_ib = fig.rect([10],[20],[5],[5], color="orange", fill_alpha=0.1, line_width=0.5, line_color="grey", legend="OOB (95% CI)")
# Line Test
# if test is not None:
# if legend_basic == True:
# legend_text = "Test"
# else:
# legend_text = "Test (AUC = {:.2f})".format(auc_test)
# # Plot IB
# data_test = {"x": fpr_linspace,
# "y": tpr_test,
# "spec": spec}
# source_test = ColumnDataSource(data=data_test)
# figline_test = fig.line("x",
# "y",
# color="purple",
# line_width=2.5,
# alpha=0.8,
# legend=legend_text,
# line_dash="dashed",
# source=source_test)
# fig.add_tools(HoverTool(renderers=[figline_test],
# tooltips=[("Specificity", "@spec{1.111}"),
# ("Sensitivity", "@y{1.111}"), ]))
if grid_line == False:
fig.xgrid.visible = False
fig.ygrid.visible = False
# Legend Manually because of bokeh issue
ib_text = "IB (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text = "OOB (AUC = {:.2f} +/- {:.2f})".format(auc_oob[2], (auc_oob[1] - auc_oob[0])/2)
fig.legend.visible = False
if legend_basic == True:
fig.legend.location = "bottom_right"
fig.legend.visible = True
else:
if test is not None:
if legend == True:
ib_text_add = Label(x=0.38, y=0.18,
text=ib_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.38, y=0.10,
text=oob_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(oob_text_add)
test_text = "Test (AUC = {:.2f})".format(auc_test)
test_text_add = Label(x=0.38, y=0.02,
text=test_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(test_text_add)
fig.quad(top=0.28, bottom=0, left=0.30, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.34,0.22,color='green',size=8)
fig.circle(0.34,0.14,color='orange',size=8)
fig.circle(0.34,0.06,color='purple',size=8)
else:
if legend == True:
if plot_num in [0,1,4]:
if width == 320:
ib_text_add = Label(x=0.38, y=0.10,
text=ib_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.38, y=0.02,
text=oob_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.20, bottom=0, left=0.30, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.34,0.14,color='green',size=8)
fig.circle(0.34,0.06,color='orange',size=8)
elif width == 475:
ib_text_add = Label(x=0.52, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.52, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.42, right=1, color='white', alpha=0.4, line_color='lightgrey')
fig.circle(0.47,0.17,color='green',size=8)
fig.circle(0.47,0.07,color='orange',size=8)
elif width == 316:
ib_text_add = Label(x=0.22, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.22, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.17,0.18,color='green',size=8)
fig.circle(0.17,0.08,color='orange',size=8)
elif width == 237:
ib_text_1 = "IB (AUC = {:.2f}".format(auc_ib[2])
ib_text_2 = "+/- {:.2f})".format((auc_ib[1] - auc_ib[0])/2)
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
ib_text_add_1 = Label(x=0.38, y=0.28,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.38, y=0.19,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.38, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.38, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.4, bottom=0, left=0.20, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.27,0.30,color='green',size=8)
fig.circle(0.27,0.10,color='orange',size=8)
elif width == 190:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 158:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 135:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
elif plot_num == 2:
if width == 475:
ib_text_add = Label(x=0.52, y=0.03,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
fig.quad(top=0.10, bottom=0, left=0.42, right=1, color='white', alpha=0.4, line_color='lightgrey')
fig.circle(0.47,0.05,color='green',size=8)
elif width == 316:
ib_text_add = Label(x=0.30, y=0.02,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
fig.quad(top=0.10, bottom=0, left=0.20, right=1, color='white', alpha=0.4, line_color='lightgrey')
fig.circle(0.25,0.05,color='green',size=8)
elif width == 237:
ib_text_1 = "IB (AUC = {:.2f}".format(auc_ib[2])
ib_text_2 = "+/- {:.2f})".format((auc_ib[1] - auc_ib[0])/2)
ib_text_add_1 = Label(x=0.38, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.38, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.2, bottom=0, left=0.20, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.27,0.10,color='green',size=8)
elif width == 190:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
elif width == 158:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f}+/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0,
text=ib_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
elif width == 135:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
elif plot_num == 3:
if width == 475:
oob_text_add = Label(x=0.52, y=0.03,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.10, bottom=0, left=0.42, right=1, color='white', alpha=0.4, line_color='lightgrey')
fig.circle(0.47,0.05,color='orange',size=8)
# fig.circle(0.47,0.07,color='orange',size=8)
elif width == 316:
oob_text_add = Label(x=0.22, y=0.02,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.10, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.17,0.05,color='orange',size=8)
elif width == 237:
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f}+/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
oob_text_add_1 = Label(x=0.38, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.38, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.2, bottom=0, left=0.20, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.27,0.10,color='orange',size=8)
elif width == 190:
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 158:
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 135:
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
if train is None:
return fig, auc_ib, auc_oob
else:
return fig, auc_ib, auc_oob
def roc_cv(Y_predfull, Y_predcv, Ytrue, width=450, height=350, xlabel="1-Specificity", ylabel="Sensitivity", legend=True, label_font_size="13pt", show_title=True, title_font_size="13pt", title="", plot_num=0, grid_line=False):
auc_check = roc_auc_score(Ytrue, Y_predfull)
if auc_check > 0.5:
pos = 1
else:
pos = 0
fprf, tprf, thresholdf = metrics.roc_curve(Ytrue, Y_predfull, pos_label=pos, drop_intermediate=False)
specf = 1 - fprf
auc_full = metrics.auc(fprf, tprf)
auc_full_hover = [auc_full] * len(tprf)
# Figure
data = {"x": fprf, "y": tprf, "spec": specf, "aucfull": auc_full_hover}
source = ColumnDataSource(data=data)
fig = figure(title=title, plot_width=width, plot_height=height, x_axis_label=xlabel, y_axis_label=ylabel, x_range=(-0.06, 1.06), y_range=(-0.06, 1.06))
# Figure: add line
# fig.line([0, 1], [0, 1], color="black", line_dash="dashed", line_width=2.5, legend="Equal Distribution Line")
fig.line([0, 1], [0, 1], color="black", line_dash="dashed", alpha=0.8, line_width=1)
if plot_num in [0, 1, 2, 4]:
figline = fig.line("x", "y", color="green", line_width=2.5, alpha=0.8, legend="FULL (AUC = {:.2f})".format(auc_full), source=source)
fig.add_tools(HoverTool(renderers=[figline], tooltips=[("Specificity", "@spec{1.111}"), ("Sensitivity", "@y{1.111}")]))
else:
pass
# ADD CV
# bootstrap using vertical averaging
# fpr, tpr with drop_intermediates for fpr = 0 (useful for plot... since we plot specificity on x-axis, we don't need intermediates when fpr=0)
fpr = fprf
tpr = tprf
tpr0 = tpr[fpr == 0][-1]
tpr = np.concatenate([[tpr0], tpr[fpr > 0]])
fpr = np.concatenate([[0], fpr[fpr > 0]])
tpr_boot = []
boot_stats = []
auc_cv = []
for i in range(len(Y_predcv)):
# Resample and get tpr, fpr
Yscore_res = Y_predcv[i]
fpr_res, tpr_res, threshold_res = metrics.roc_curve(Ytrue, Yscore_res, pos_label=pos, drop_intermediate=False)
auc_cv.append(metrics.auc(fpr_res, tpr_res))
# Drop intermediates when fpr=0
tpr0_res = tpr_res[fpr_res == 0][-1]
tpr_res = np.concatenate([[tpr0_res], tpr_res[fpr_res > 0]])
fpr_res = np.concatenate([[0], fpr_res[fpr_res > 0]])
# Vertical averaging... use closest fpr_res to fpr, and append the corresponding tpr
idx = [np.abs(i - fpr_res).argmin() for i in fpr]
tpr_list = tpr_res[idx]
tpr_boot.append(tpr_list)
# Get CI for tpr
tpr_lowci = np.percentile(tpr_boot, 2.5, axis=0)
tpr_uppci = np.percentile(tpr_boot, 97.5, axis=0)
tpr_medci = np.percentile(tpr_boot, 50, axis=0)
# Add the starting 0
tpr = np.insert(tpr, 0, 0)
fpr = np.insert(fpr, 0, 0)
tpr_lowci = np.insert(tpr_lowci, 0, 0)
tpr_uppci = np.insert(tpr_uppci, 0, 0)
tpr_medci = np.insert(tpr_medci, 0, 0)
# Get CI for cv
auc_lowci = np.percentile(auc_cv, 2.5, axis=0)
auc_uppci = np.percentile(auc_cv, 97.5, axis=0)
auc_medci = np.percentile(auc_cv, 50, axis=0)
auc_ci = (auc_uppci - auc_lowci) / 2
auc_ci_hover = [auc_ci] * len(tpr_medci)
auc_med_hover = [auc_medci] * len(tpr_medci)
# Concatenate tpr_ci
tpr_ci = np.array([tpr_lowci, tpr_uppci, tpr_medci])
# specificity and ci-interval for HoverTool
spec2 = 1 - fpr
ci2 = (tpr_uppci - tpr_lowci) / 2
data2 = {"x": fpr, "y": tpr_medci, "lowci": tpr_lowci, "uppci": tpr_uppci, "spec": spec2, "ci": ci2}
source2 = ColumnDataSource(data=data2)
if plot_num in [0, 1, 3, 4]:
figline = fig.line("x", "y", color="orange", line_width=2.5, alpha=0.8, legend="CV (AUC = {:.2f} +/- {:.2f})".format(auc_medci, auc_ci,), source=source2)
fig.add_tools(HoverTool(renderers=[figline], tooltips=[("Specificity", "@spec{1.111}"), ("Sensitivity", "@y{1.111} (+/- @ci{1.111})")]))
# Figure: add 95CI band
figband = Band(base="x", lower="lowci", upper="uppci", level="underlay", fill_alpha=0.1, line_width=0.5, line_color="black", fill_color="orange", source=source2)
fig.add_layout(figband)
else:
pass
# Change font size
if show_title is True:
fig.title.text = "AUC FULL ({}) & AUC CV ({} +/- {})".format(np.round(auc_full, 2), np.round(auc_medci, 2), np.round(auc_ci, 2))
fig.title.text_font_size = title_font_size
fig.xaxis.axis_label_text_font_size = label_font_size
fig.yaxis.axis_label_text_font_size = label_font_size
# Extra padding
fig.min_border_left = 20
fig.min_border_right = 20
fig.min_border_top = 20
fig.min_border_bottom = 20
# Edit legend
fig.legend.location = "bottom_right"
# fig.legend.label_text_font_size = "1pt"
# fig.legend.label_text_font = "1pt"
# if legend is False:
# fig.legend.visible = False
if grid_line == False:
fig.xgrid.visible = False
fig.ygrid.visible = False
# Legend Manually because of bokeh issue
auc_full = np.round(auc_full, 2)
auc_cv1 = np.round(auc_medci, 2)
auc_cv2 = np.round(auc_ci, 2)
ib_text = "FULL (AUC = {:.2f})".format(auc_full)
oob_text = "CV (AUC = {:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
fig.legend.visible = False
if legend == True:
if plot_num in [0,1,4]:
if width == 475:
ib_text_add = Label(x=0.52, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.52, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.42, right=1, color='white', alpha=0.4,line_color='lightgrey')
fig.circle(0.47,0.17,color='green',size=8)
fig.circle(0.47,0.07,color='orange',size=8)
elif width == 316:
ib_text_add = Label(x=0.30, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.30, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.20, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.25,0.18,color='green',size=8)
fig.circle(0.25,0.08,color='orange',size=8)
elif width == 237:
ib_text_add = Label(x=0.30, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '6.4pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.30, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '6.4pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.20, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.25,0.18,color='green',size=8)
fig.circle(0.25,0.08,color='orange',size=8)
elif width == 190:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 158:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 135:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
elif plot_num == 2:
if width == 475:
ib_text_add = Label(x=0.52, y=0.03,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
fig.quad(top=0.10, bottom=0, left=0.42, right=1, color='white', alpha=0.4,line_color='lightgrey')
fig.circle(0.47,0.05,color='green',size=8)
elif width == 316:
ib_text_add = Label(x=0.40, y=0.02,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
fig.quad(top=0.12, bottom=0, left=0.30, right=1, color='white', alpha=0.4,line_color='lightgrey')
fig.circle(0.35,0.05, color='green',size=8)
elif width == 237:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
ib_text_add_1 = Label(x=0.38, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.38, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.21, bottom=0, left=0.20, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.27,0.10,color='green',size=8)
elif width == 190:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.25, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
elif width == 158:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0,
text=ib_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.25, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
elif width == 135:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.25, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
elif plot_num == 3:
if width == 475:
oob_text_add = Label(x=0.52, y=0.03,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.10, bottom=0, left=0.42, right=1, color='white', alpha=0.4,line_color='lightgrey')
fig.circle(0.47,0.05,color='orange',size=8)
# fig.circle(0.47,0.07,color='orange',size=8)
elif width == 316:
oob_text_add = Label(x=0.27, y=0.02,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.11, bottom=0, left=0.17, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.22,0.05,color='orange',size=8)
elif width == 237:
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
oob_text_add_1 = Label(x=0.38, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.38, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.21, bottom=0, left=0.20, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.27,0.10,color='orange',size=8)
elif width == 190:
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 158:
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 135:
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
return fig
def per_method(bootstat, stat):
"""Calculates bootstrap confidence intervals using the percentile bootstrap interval."""
if stat.ndim == 1:
boot_ci = []
# Calculate bootci for each component (peak), and append it to bootci
for i in range(len(bootstat[0])):
bootstat_i = [item[i] for item in bootstat]
lower_ci = np.percentile(bootstat_i, 2.5)
upper_ci = np.percentile(bootstat_i, 97.5)
mid_ci = np.percentile(bootstat_i, 50)
boot_ci.append([lower_ci, upper_ci, mid_ci])
boot_ci = np.array(boot_ci)
elif stat.ndim == 0:
lower_ci = np.percentile(bootstat, 2.5)
upper_ci = np.percentile(bootstat, 97.5)
mid_ci = np.percentile(bootstat, 50)
boot_ci = [lower_ci, upper_ci, mid_ci]
boot_ci = np.array(boot_ci)
# Recursive component (to get ndim = 1, and append)
else:
ncomp = stat.shape[1]
boot_ci = []
for k in range(ncomp):
bootstat_k = []
for j in range(len(bootstat)):
bootstat_k.append(bootstat[j][:, k])
boot_ci_k = per_method(bootstat_k, stat[:, k])
boot_ci.append(boot_ci_k)
boot_ci = np.array(boot_ci)
return boot_ci
def cper_method(bootstat, stat):
"""Calculates bootstrap confidence intervals using the bias-corrected bootstrap interval."""
if stat.ndim == 1:
nboot = len(bootstat)
zalpha = norm.ppf(0.05 / 2)
obs = stat # Observed mean
meansum = np.zeros((1, len(obs))).flatten()
for i in range(len(obs)):
for j in range(len(bootstat)):
if bootstat[j][i] >= obs[i]:
meansum[i] = meansum[i] + 1
prop = meansum / nboot # Proportion of times boot mean > obs mean
z0 = -norm.ppf(prop)
# new alpha
pct1 = 100 * norm.cdf((2 * z0 + zalpha))
pct2 = 100 * norm.cdf((2 * z0 - zalpha))
pct3 = 100 * norm.cdf((2 * z0))
boot_ci = []
for i in range(len(pct1)):
bootstat_i = [item[i] for item in bootstat]
append_low = np.percentile(bootstat_i, pct1[i])
append_mid = np.percentile(bootstat_i, pct3[i])
append_upp = np.percentile(bootstat_i, pct2[i])
boot_ci.append([append_low, append_upp, append_mid])
boot_ci = np.array(boot_ci)
# Recursive component (to get ndim = 1, and append)
else:
ncomp = stat.shape[1]
boot_ci = []
for k in range(ncomp):
bootstat_k = []
for j in range(len(bootstat)):
bootstat_k.append(bootstat[j][:, k])
boot_ci_k = cper_method(bootstat_k, stat[:, k])
boot_ci.append(boot_ci_k)
boot_ci = np.array(boot_ci)
return boot_ci
def bca_method(bootstat, stat, jackstat):
"""Calculates bootstrap confidence intervals using the bias-corrected and accelerated bootstrap interval."""
if stat.ndim == 1:
nboot = len(bootstat)
zalpha = norm.ppf(0.05 / 2)
obs = stat # Observed mean
meansum = np.zeros((1, len(obs))).flatten()
for i in range(len(obs)):
for j in range(len(bootstat)):
if bootstat[j][i] >= obs[i]:
meansum[i] = meansum[i] + 1
prop = meansum / nboot # Proportion of times boot mean > obs mean
z0 = -norm.ppf(prop, loc=0, scale=1)
# new alpha
jmean = np.mean(jackstat, axis=0)
num = np.sum((jmean - jackstat) ** 3, axis=0)
den = np.sum((jmean - jackstat) ** 2, axis=0)
ahat = num / (6 * den ** (3 / 2))
# Ignore warnings as they are delt with at line 123 with try/except
with warnings.catch_warnings():
warnings.simplefilter("ignore")
zL = z0 + norm.ppf(0.05 / 2, loc=0, scale=1)
pct1 = 100 * norm.cdf((z0 + zL / (1 - ahat * zL)))
zU = z0 + norm.ppf((1 - 0.05 / 2), loc=0, scale=1)
pct2 = 100 * norm.cdf((z0 + zU / (1 - ahat * zU)))
zM = z0 + norm.ppf((0.5), loc=0, scale=1)
pct3 = 100 * norm.cdf((z0 + zM / (1 - ahat * zM)))
# pct3 = (pct1 + pct2) / 2
# for i in range(len(pct3)):
# if np.isnan(pct3[i]) == True:
# pct3[i] = (pct2[i] + pct1[i]) / 2
boot_ci = []
for i in range(len(pct1)):
bootstat_i = [item[i] for item in bootstat]
try:
append_low = np.percentile(bootstat_i, pct1[i])
append_upp = np.percentile(bootstat_i, pct2[i])
append_mid = np.percentile(bootstat_i, pct3[i])
except ValueError:
# Use BC (CPerc) as there is no skewness
pct1 = 100 * norm.cdf((2 * z0 + zalpha))
pct2 = 100 * norm.cdf((2 * z0 - zalpha))
pct2 = 100 * norm.cdf((2 * z0))
append_low = np.percentile(bootstat_i, pct1[i])
append_upp = np.percentile(bootstat_i, pct2[i])
append_mid = np.percentile(bootstat_i, pct2[i])
boot_ci.append([append_low, append_upp, append_mid])
# Recursive component (to get ndim = 1, and append)
else:
ncomp = stat.shape[1]
boot_ci = []
for k in range(ncomp):
var = []
var_jstat = []
for j in range(len(bootstat)):
var.append(bootstat[j][:, k])
for m in range(len(jackstat)):
var_jstat.append(jackstat[m][:, k])
var_boot = bca_method(var, stat[:, k], var_jstat)
boot_ci.append(var_boot)
boot_ci = np.array(boot_ci)
return boot_ci
def get_sens_spec(Ytrue, Yscore, cuttoff_val):
"""Get sensitivity and specificity from cutoff value."""
Yscore_round = np.where(np.array(Yscore) > cuttoff_val, 1, 0)
tn, fp, fn, tp = metrics.confusion_matrix(Ytrue, Yscore_round).ravel()
sensitivity = tp / (tp + fn)
specificity = tn / (tn + fp)
return sensitivity, specificity
def get_sens_cuttoff(Ytrue, Yscore, specificity_val):
"""Get sensitivity and cuttoff value from specificity."""
fpr0 = 1 - specificity_val
fpr, sensitivity, thresholds = metrics.roc_curve(Ytrue, Yscore, pos_label=1, drop_intermediate=False)
idx = np.abs(fpr - fpr0).argmin() # this find the closest value in fpr to fpr0
# Check that this is not a perfect roc curve
# If it is perfect, allow sensitivity = 1, rather than 0
if specificity_val == 1 and sensitivity[idx] == 0:
for i in range(len(fpr)):
if fpr[i] == 1 and sensitivity[i] == 1:
return 1, 0.5
return sensitivity[idx], thresholds[idx]
def get_spec_sens_cuttoff(Ytrue, Yscore, metric, val):
"""Return specificity, sensitivity, cutoff value provided the metric and value used."""
if metric == "specificity":
specificity = val
sensitivity, threshold = get_sens_cuttoff(Ytrue, Yscore, val)
elif metric == "cutoffscore":
threshold = val
sensitivity, specificity = get_sens_spec(Ytrue, Yscore, val)
return specificity, sensitivity, threshold
def get_stats(Ytrue, Yscore, specificity, parametric):
"""Calculates binary metrics given the specificity."""
sensitivity, cutoffscore = get_sens_cuttoff(Ytrue, Yscore, specificity)
stats = binary_metrics(Ytrue, Yscore, cut_off=cutoffscore, parametric=parametric)
return stats
|
# encoding: utf-8
"""Utilities for cr.cube tests."""
import os
def load_expectation(expectation_file_name, strip=True): # pragma: no cover
"""Return (unicode) str containing text in *expectation_file_name*.
Expectation file path is rooted at tests/expectations.
"""
thisdir = os.path.dirname(__file__)
expectation_file_path = os.path.abspath(
os.path.join(thisdir, "expectations", "%s.txt" % expectation_file_name)
)
with open(expectation_file_path, "rb") as f:
expectation_bytes = f.read()
if strip:
return expectation_bytes.decode("utf-8").strip()
return expectation_bytes.decode("utf-8")
def load_python_expression(expression_file_name):
"""Return a Python object (list, dict) formed by parsing `expression_file_name`.
Expectation file path is rooted at tests/expectations.
"""
thisdir = os.path.dirname(__file__)
expression_file_path = os.path.abspath(
os.path.join(thisdir, "expectations", "%s.py" % expression_file_name)
)
with open(expression_file_path) as f:
expression_bytes = f.read()
return eval(expression_bytes)
|
# Generated by Django 2.2.7 on 2020-09-27 05:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0014_auto_20200926_2116'),
]
operations = [
migrations.CreateModel(
name='Tabs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fontsize', models.IntegerField(default=15, verbose_name='Fontsize')),
('autosave', models.IntegerField(default=0, verbose_name='autosave')),
],
),
migrations.AlterField(
model_name='portable',
name='sex',
field=models.IntegerField(choices=[(1, 'Male'), (0, 'Female')], default=0, verbose_name='Sex'),
),
migrations.AlterField(
model_name='submenu',
name='status',
field=models.IntegerField(choices=[(2, 'Page'), (1, 'Address'), (0, 'Category')], default=0, verbose_name='Status'),
),
migrations.AlterField(
model_name='task_manegar',
name='status',
field=models.IntegerField(choices=[(1, 'Completing'), (0, 'Incomplete!'), (3, 'Complete!')], default=0, verbose_name='Status'),
),
]
|
# Author: Matthew Mills
# Copyright 2016
# The purpose of this class is to provide the pricing oracle required to feed market data
# We write this in a similar fashion to how it would operate in Ethereum
# Hence the constructor initiates the oracle, which then refreshes when told
# ---
# This is the oracle to interface with Medici
# Imports for JSON
import json
# Imports for MongoDB
from pymongo import MongoClient
class PriceOracle:
'The pricing oracle class used to pull live market data'
def __init__(self, client, port):
try:
self.client = MongoClient(client + ":" + port)
print("MongoDB client connected")
except Exception as e:
print(str(e))
def updateClient(self, client, port):
try:
self.client = MongoClient(client + ":" + port)
print("MongoDB client updated")
except Exception as e:
print(str(e))
def getDocumentsByDate(self, db, collection, date):
db = self.client[db]
collection = db[collection]
trades = collection.find(
{
'date': {'$regex' : str(date) + ".*"}
}
)
return trades
def getOneDocumentByDate(self, db, collection, date):
db = self.client[db]
collection = db[collection]
trades = collection.find_one(
{
'date': {'$regex' : str(date) + ".*"}
}
)
return trades
|
#
# Software distrubuted under MIT License (MIT)
#
# Copyright (c) 2020 Flexpool
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import requests
from typing import Dict, List
from datetime import datetime
from . import exceptions
class Block:
def __init__(
self,
block_hash: str,
number: int,
block_type: str,
miner: str,
difficulty: int,
timestamp: int,
confirmed: bool,
round_time: int,
luck: float,
region: str,
static_block_reward: int,
tx_fee_reward: int,
mev_reward: int,
reward: int,
):
self.number = number
self.hash = block_hash
self.type = block_type
self.miner = miner
self.difficulty = difficulty
self.time = datetime.fromtimestamp(timestamp)
self.timestamp = timestamp
self.confirmed = confirmed
self.round_time = round_time
self.luck = luck
self.region = region
self.static_block_reward = static_block_reward
self.tx_fee_reward = tx_fee_reward
self.mev_reward = mev_reward
self.reward = reward
def __repr__(self):
return (
"<flexpoolapi.shared.Block object "
f"({self.type.capitalize()} {self.hash[:5 + 2] + '…' + self.hash[-5:]})>"
)
class PageResponse:
def __init__(self, contents: List, total_items: int, total_pages: int):
self.contents = contents
self.total_items = total_items
self.total_pages = total_pages
def __getitem__(self, index):
return self.contents[index]
def __len__(self):
return len(self.contents)
def __repr__(self):
return f"<flexpoolapi.shared.PageResponse object {str(self.contents)}>"
def __str__(self):
return str(self.contents)
def check_response(request):
if request.status_code not in [200, 201, 400]:
raise (
exceptions.UnexpectedStatusCode(
f"API Returned unexpected status code: {request.status_code} "
f"{request.reason} (Request URL: {request.url})"
)
)
if request.text:
error = (
"error" in request.json()
and request.json()["error"]
or "message" in request.json()
and request.json()["message"]
)
if error:
raise (
exceptions.APIError(
f"API Returned error: {error} (Request URL: {request.url})"
)
)
def get(endpoint: str, params: List = []) -> Dict:
api_request = requests.get(endpoint, params=params)
check_response(api_request)
return api_request.json()["result"]
def post(endpoint: str, params: List):
api_request = requests.put(endpoint, params=params)
check_response(api_request)
|
from dotenv import load_dotenv
load_dotenv()
from .impl import Bot
__all__ = ("Bot",)
|
"""A simple config manager."""
# TODO: add real config handling
config = {'tag_db_path': 'taglist.xml'}
|
import sys
import numpy as np
from scipy import special
from pymoo.util.misc import find_duplicates, cdist
# =========================================================================================================
# Model
# =========================================================================================================
class ReferenceDirectionFactory:
def __init__(self, n_dim, scaling=None, lexsort=True, verbose=False, seed=None, **kwargs) -> None:
super().__init__()
self.n_dim = n_dim
self.scaling = scaling
self.lexsort = lexsort
self.verbose = verbose
self.seed = seed
def do(self):
# set the random seed if it is provided
if self.seed is not None:
np.random.seed(self.seed)
if self.n_dim == 1:
return np.array([[1.0]])
else:
val = self._do()
if isinstance(val, tuple):
ref_dirs, other = val[0], val[1:]
else:
ref_dirs = val
if self.scaling is not None:
ref_dirs = scale_reference_directions(ref_dirs, self.scaling)
# do ref_dirs is desired
if self.lexsort:
I = np.lexsort([ref_dirs[:, j] for j in range(ref_dirs.shape[1])][::-1])
ref_dirs = ref_dirs[I]
return ref_dirs
def _do(self):
return None
# =========================================================================================================
# Das Dennis Reference Directions (Uniform)
# =========================================================================================================
def get_number_of_uniform_points(n_partitions, n_dim):
"""
Returns the number of uniform points that can be created uniformly.
"""
return int(special.binom(n_dim + n_partitions - 1, n_partitions))
def get_partition_closest_to_points(n_points, n_dim):
"""
Returns the corresponding partition number which create the desired number of points
or less!
"""
if n_dim == 1:
return 0
n_partitions = 1
_n_points = get_number_of_uniform_points(n_partitions, n_dim)
while _n_points <= n_points:
n_partitions += 1
_n_points = get_number_of_uniform_points(n_partitions, n_dim)
return n_partitions - 1
def das_dennis(n_partitions, n_dim):
if n_partitions == 0:
return np.full((1, n_dim), 1 / n_dim)
else:
ref_dirs = []
ref_dir = np.full(n_dim, np.nan)
das_dennis_recursion(ref_dirs, ref_dir, n_partitions, n_partitions, 0)
return np.concatenate(ref_dirs, axis=0)
def das_dennis_recursion(ref_dirs, ref_dir, n_partitions, beta, depth):
if depth == len(ref_dir) - 1:
ref_dir[depth] = beta / (1.0 * n_partitions)
ref_dirs.append(ref_dir[None, :])
else:
for i in range(beta + 1):
ref_dir[depth] = 1.0 * i / (1.0 * n_partitions)
das_dennis_recursion(ref_dirs, np.copy(ref_dir), n_partitions, beta - i, depth + 1)
class UniformReferenceDirectionFactory(ReferenceDirectionFactory):
def __init__(self, n_dim, scaling=None, n_points=None, n_partitions=None, **kwargs) -> None:
super().__init__(n_dim, scaling=scaling, **kwargs)
if n_points is not None:
n_partitions = get_partition_closest_to_points(n_points, n_dim)
results_in = get_number_of_uniform_points(n_partitions, n_dim)
# the number of points are not matching to any partition number
if results_in != n_points:
results_in_next = get_number_of_uniform_points(n_partitions + 1, n_dim)
raise Exception("The number of points (n_points = %s) can not be created uniformly.\n"
"Either choose n_points = %s (n_partitions = %s) or "
"n_points = %s (n_partitions = %s)." %
(n_points, results_in, n_partitions, results_in_next, n_partitions + 1))
self.n_partitions = n_partitions
elif n_partitions is not None:
self.n_partitions = n_partitions
else:
raise Exception("Either provide number of partitions or number of points.")
def _do(self):
return das_dennis(self.n_partitions, self.n_dim)
# =========================================================================================================
# Multi Layer
# =========================================================================================================
class MultiLayerReferenceDirectionFactory:
def __init__(self, *args) -> None:
self.layers = []
self.layers.extend(args)
def add_layer(self, *args):
self.layers.extend(args)
def do(self):
ref_dirs = []
for factory in self.layers:
ref_dirs.append(factory)
ref_dirs = np.concatenate(ref_dirs, axis=0)
is_duplicate = find_duplicates(ref_dirs)
return ref_dirs[np.logical_not(is_duplicate)]
# =========================================================================================================
# Util
# =========================================================================================================
def sample_on_unit_simplex(n_points, n_dim, unit_simplex_mapping="kraemer"):
if unit_simplex_mapping == "sum":
rnd = map_onto_unit_simplex(np.random.random((n_points, n_dim)), "sum")
elif unit_simplex_mapping == "kraemer":
rnd = map_onto_unit_simplex(np.random.random((n_points, n_dim)), "kraemer")
elif unit_simplex_mapping == "das-dennis":
n_partitions = get_partition_closest_to_points(n_points, n_dim)
rnd = UniformReferenceDirectionFactory(n_dim, n_partitions=n_partitions).do()
else:
raise Exception("Please define a valid sampling on unit simplex strategy!")
return rnd
def map_onto_unit_simplex(rnd, method):
n_points, n_dim = rnd.shape
if method == "sum":
ret = rnd / rnd.sum(axis=1)[:, None]
elif method == "kraemer":
M = sys.maxsize
rnd *= M
rnd = rnd[:, :n_dim - 1]
rnd = np.column_stack([np.zeros(n_points), rnd, np.full(n_points, M)])
rnd = np.sort(rnd, axis=1)
ret = np.full((n_points, n_dim), np.nan)
for i in range(1, n_dim + 1):
ret[:, i - 1] = rnd[:, i] - rnd[:, i - 1]
ret /= M
else:
raise Exception("Invalid unit simplex mapping!")
return ret
def scale_reference_directions(ref_dirs, scaling):
return ref_dirs * scaling + ((1 - scaling) / ref_dirs.shape[1])
def select_points_with_maximum_distance(X, n_select, selected=[]):
n_points, n_dim = X.shape
# calculate the distance matrix
D = cdist(X, X)
# if no selection provided pick randomly in the beginning
if len(selected) == 0:
selected = [np.random.randint(len(X))]
# create variables to store what selected and what not
not_selected = [i for i in range(n_points) if i not in selected]
# remove unnecessary points
dist_to_closest_selected = D[:, selected].min(axis=1)
# now select the points until sufficient ones are found
while len(selected) < n_select:
# find point that has the maximum distance to all others
index_in_not_selected = dist_to_closest_selected[not_selected].argmax()
I = not_selected[index_in_not_selected]
# add the closest distance to selected point
is_closer = D[I] < dist_to_closest_selected
dist_to_closest_selected[is_closer] = D[I][is_closer]
# add it to the selected and remove from not selected
selected.append(I)
not_selected = np.delete(not_selected, index_in_not_selected)
return selected
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.