content stringlengths 5 1.05M |
|---|
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:
res = []
if root is None:
return res
self.__helper(root, [], sum, res)
return res
def __helper(self, node, path, sum, res):
if node is None:
return
path.append(node.val)
if node.left is None and node.right is None and node.val == sum:
res.append(path[:])
if node.left:
self.__helper(node.left, path, sum - node.val, res)
if node.right:
self.__helper(node.right, path, sum - node.val, res)
path.pop()
if __name__ == '__main__':
l = [1, 2, 3, 5]
l2 = l.copy()
l2[1] = 100
print(l)
|
from .fontconfig import has_fontconfig, query_fontconfig_database
__all__ = ['find_font', 'query_fontconfig_database']
if not has_fontconfig():
raise OSError(
'This module currently only supports systems using "fontconfig"'
)
def find_font(name, **kwargs):
try:
return query_fontconfig_database(name, **kwargs)[0]['file']
except IndexError:
query = ','.join(f'{k}={v}' for k, v in kwargs.items())
if query:
raise OSError(f'Font "{name}" with properties {query} not found')
else:
raise OSError(f'Font "{name}" not found')
|
import os
from dotenv import load_dotenv
import openai
load_dotenv()
openai.organization = "org-D2FBgBhwLFkKAOsgtSp86b4i"
openai.api_key = os.getenv("OPENAI_API_KEY")
remote_files = openai.File.list()["data"]
training_files = filter(lambda f: "answers.jsonl" in f["filename"], remote_files)
latest_file = max(training_files, key=lambda x: x["created_at"])
questions = [
"What destiny is 'Luke Skywalker, Jedi Knight'?",
"What planet is Luke Skywalker from?",
"Which Dark Jedi Master has destiny 6?",
"Which Dark Jedi is power 4 and destiny 6?",
"How many vehicles have a maintenace icon?",
"Which starship has a maintenace icon?",
"What class of Star Destroyer is Conquest?",
"Is Grand Moff Tarkin a leader?",
"Is Grand Moff Tarkin a smuggler?",
]
for question in questions:
answer = openai.Answer.create(
search_model="ada",
model="curie",
question=question,
file=latest_file["id"],
examples_context="Captain Jean-Luc Picard is a Light Side character card. Captain Jean-Luc Picard is a Federation human. Captain Jean-Luc Picard has a power of 5. Will Riker is a humam. Will Riker has a power of 6. Data is an android. Data has a power of 10.",
examples=[
["What Power is Jean-Luc Picard?", "Captain Jean Luc Picard is Power 5"],
["Which side of the Force is Picard?", "Picard is a Light Side card."],
["What race is Captain Jean-Luc Picard?", "Captain Jean-Luc Picard is human."],
["Is Jean-Luc Picard a Federation human?", "Yes"],
["Is Jean-Luc Picard a Dominion Changeling?", "No"],
["Which human has the highest power?", "Captain Jean-Luc Picard"],
["Which character has power 5?", "Captain Jean-Luc Picard"],
["Which card has the highest power?", "Data"],
["Which Federation character has the highest power?", "Data"]
],
max_rerank=50,
max_tokens=20,
stop=["\n", "<|endoftext|>"]
)
print(question)
print(f'> {answer["answers"][0]}') |
from graphene import ID, String, ObjectType, List, NonNull
from .company import Company
from btb.api.schema.resolvers import companies_by_principal
class User(ObjectType):
id = ID(required=True)
external_id = ID(required=True)
email = String(required=True)
"""Deprecated"""
name = String(required=True)
first_name = String(required=True)
last_name = String(required=True)
picture_url = String(required=False)
companies = List(NonNull(Company), resolver=companies_by_principal)
def resolve_name(root, info):
return "{} {}".format(root["first_name"], root["last_name"])
|
#!/usr/bin/env python3
import json
import os
import shutil
import zipfile
from os.path import join
import init
from utils import file_utils
from utils import process_utils
VERSION_FILE = 'version.txt'
BUILD_FOLDER = 'build'
class BuildInfo():
def __init__(self):
self.files = set()
def include(self, path):
matching_files = file_utils.search_glob(path, recursive=True)
for file in matching_files:
self.files.add(file)
def exclude(self, path):
matching_files = file_utils.search_glob(path, recursive=True)
for excluded in matching_files:
if excluded in self.files:
self.files.remove(excluded)
if os.path.isdir(excluded):
files_to_remove = []
for file in self.files:
common_path = os.path.commonprefix([excluded, file])
if common_path == excluded:
files_to_remove.append(file)
for file_to_remove in files_to_remove:
self.files.remove(file_to_remove)
def get_files(self):
return self.files
def get_npm_version():
package_json = json.loads(file_utils.read_file(join('web-src', 'package.json')))
if 'version' in package_json:
return parse_semver_str(package_json['version'])
raise Exception('Failed to find version parameter in package.json')
def parse_semver_str(version_string):
return [int(v) for v in version_string.split('.')]
def create_version_file():
current_branch = process_utils.invoke('git rev-parse --abbrev-ref HEAD').strip()
npm_version = get_npm_version()
if current_branch == 'stable':
last_tag = process_utils.invoke('git describe --abbrev=0 --tags').strip()
last_tag_version = parse_semver_str(last_tag)
if (last_tag_version[0] == npm_version[0]) and (last_tag_version[1] == npm_version[1]):
new_version = [last_tag_version[0], last_tag_version[1], last_tag_version[2] + 1]
else:
new_version = npm_version
new_version = '.'.join([str(v) for v in new_version])
else:
git_hash = process_utils.invoke('git rev-parse --short HEAD').strip()
new_version = str(npm_version[0])
new_version += '.' + str(npm_version[1] + 1)
new_version += '.0-'
new_version += current_branch + '@' + git_hash
file_utils.write_file(VERSION_FILE, new_version)
if os.path.exists(BUILD_FOLDER):
shutil.rmtree(BUILD_FOLDER)
os.mkdir(BUILD_FOLDER)
init.prepare_project('', prod=True)
create_version_file()
build_info = BuildInfo()
build_info.include('launcher.py')
build_info.include('requirements.txt')
build_info.include(VERSION_FILE)
build_info.include(os.path.join('src', '**', '*.py'))
build_info.include(os.path.join('conf', 'logging.json'))
build_info.include(os.path.join('web', '**'))
build_info.include(os.path.join('conf', 'runners'))
build_info.exclude(os.path.join('src', 'tests'))
build_info.exclude('tools')
build_info.exclude('samples')
build_info.exclude(BUILD_FOLDER)
zip = zipfile.ZipFile(os.path.join(BUILD_FOLDER, 'script-server.zip'), 'w', zipfile.ZIP_DEFLATED)
for file in build_info.get_files():
zip.write(file)
os.remove(VERSION_FILE)
|
""" train.py """
import argparse
from utils.load import load_yaml
from model import get_model
def parser():
parser = argparse.ArgumentParser('Classification Argument')
parser.add_argument('--configfile', type=str, default='./configs/default.yml', help='config file')
parser.add_argument('--eval', action='store_true', help='eval mode')
args = parser.parse_args()
return args
def run(args):
"""Builds model, loads data, trains and evaluates"""
config = load_yaml(args.configfile)
model = get_model(config)
model.load_data(args.eval)
model.build()
if args.eval:
model.evaluate()
else:
model.train()
if __name__ == '__main__':
args = parser()
run(args) |
from django.http.response import Http404
from django.shortcuts import render,HttpResponse
import pickle
import pandas as pd
import sklearn
# Create your views here.
def home(request):
return render(request, 'home.html')
def predict(request):
with open("static/india_home_price_prediction.pickle", 'rb') as f:
rndm_frst = pickle.load(f)
print(rndm_frst.predict([[0,1,3,1300,1,1,12.9,77,1,1,0 ]]))
if request.method =="POST":
lat = request.POST.get('lat', '')
long = request.POST.get('long', '')
rera = request.POST.get('rera', '0')
under_const = request.POST.get('under_const', '0')
resale = request.POST.get('resale','0')
ready_to_move = request.POST.get('ready_to_move', '0')
type_of_seller = request.POST.get('type_of_seller','')
owner = request.POST.get('owner', '0')
sqft = request.POST.get('sqft', '')
bhk_no = request.POST.get('bhk', '')
bath_no = request.POST.get('bath', '')
form_data ={
'lat':lat,
'long':long,
'rera':rera,
'under_const': under_const,
'ready_to_move': ready_to_move,
'resale': resale,
'type_of_seller':type_of_seller,
'sqft' : sqft,
'bhk_no': bhk_no,
'bath_no':bath_no
}
print(form_data)
if type_of_seller =='Owner':
prediction_ = rndm_frst.predict([[under_const, rera, bhk_no, sqft, ready_to_move, resale, long, lat, 1, 0,0]])
if type_of_seller =='Dealer':
prediction_ = rndm_frst.predict([[under_const, rera, bhk_no, sqft, ready_to_move, resale, long, lat, 1, 0,1]])
if type_of_seller =='Builder':
prediction_ = rndm_frst.predict([[under_const, rera, bhk_no, sqft, ready_to_move, resale, long, lat, 1, 1,0]])
prediction_ = prediction_[0]
price_per_sqft = (prediction_/float(sqft))*100
form_data ={
'lat':round(float(lat), 4),
'long':round(float(long), 4),
'rera':rera,
'under_const': under_const,
'ready_to_move': ready_to_move,
'resale': resale,
'type_of_seller':type_of_seller,
'sqft' : sqft,
'bhk_no': bhk_no,
'bath_no':bath_no,
'prediction_':prediction_,
'ppsqft': price_per_sqft
}
print(prediction_)
return render(request, "prediction.html", form_data)
else:
raise Http404()
|
# -*- encoding: utf-8 -*-
# Module iadist
from numpy import *
from ia870.iasecross import iasecross
def iadist(f, Bc=iasecross(), METRIC=None):
from iagray import iagray
from iaintersec import iaintersec
from iaisequal import iaisequal
from iaero import iaero
from iasebox import iasebox
if METRIC is not None: METRIC = METRIC.upper()
f = iagray(f,'uint16')
y = iaintersec(f,0)
if (METRIC == 'EUCLIDEAN') or (METRIC == 'EUC2'):
f = int32(f)
b = int32(zeros((3,3)))
i=1
while any(f != y):
a4,a2 = -4*i+2, -2*i+1
b = int32([[a4,a2,a4],
[a2, 0,a2],
[a4,a2,a4]])
y=f
i=i+1
f = iaero(f,b)
if METRIC == 'EUCLIDEAN':
y = uint16(sqrt(f)+0.5)
else:
if iaisequal(Bc, iasecross()):
b = int32([[-2147483647, -1, -2147483647],
[ -1, 0, -1],
[-2147483647, -1, -2147483647]])
elif iaisequal(Bc, iasebox()):
b = int32([[-1,-1,-1],
[-1, 0,-1],
[-1,-1,-1]])
else: b = Bc
while any(f != y):
y=f
f = iaero(f,b)
return y
|
import logging
import re
from typing import List, Optional
from pydantic import BaseModel, validator
from .meta.pydanticbase import PydanticBase
logger = logging.getLogger(__name__)
# Matching 00p in currency
CURRENCY_PENCE_REGEX = re.compile(r"(\d{2})[p]")
# Matches any whitespace characters and £ symbol
CURRENCY_WHITESPACE_REGEX = re.compile(r"[£()\s+]")
class ScrapedItem(PydanticBase):
name: Optional[str] = None
url: Optional[str] = None
price: Optional[str] = None
price_per_unit: Optional[str] = None
image_url: Optional[str] = None
@validator("price")
def format_p(cls, v):
"""Transforms varying pricing formats to #.##"""
if v:
v = re.sub(CURRENCY_WHITESPACE_REGEX, "", v)
match = re.match(CURRENCY_PENCE_REGEX, v)
if match:
v = f"0.{match.group(1)}"
return v
@validator("price_per_unit")
def format_ppu(cls, v):
if v:
v = re.sub(CURRENCY_WHITESPACE_REGEX, "", v)
v = v.replace("per", "/")
return v
@validator("*", pre=True)
def filter_invalid_values(cls, v):
if isinstance(v, list):
return None
return v
class ShopListings(BaseModel):
id: int
name: str
listings: Optional[List[ScrapedItem]] = None
|
from bottle import Bottle, route, run, request, response, abort, error, redirect
from rdflib import ConjunctiveGraph, URIRef
from pyld import jsonld
from pyld.jsonld import compact, expand, frame, from_rdf, to_rdf, JsonLdProcessor
import json
import codecs
import sys
import requests
import os
from cromulent.model import factory, Person, Type, InformationObject, \
Appellation, Group, TimeSpan, Place, BeginningOfExistence, EndOfExistence, \
Actor, Creation, Activity, OrderedDict
from cromulent.vocab import WebPage, Nationality, Gender, BiographyStatement, \
PrimaryName, Description, Active
from cromulent.extra import add_rdf_value, add_schema_properties
add_rdf_value()
add_schema_properties()
factory.base_url = "http://vocab.getty.edu/ulan/"
factory.base_dir = "data"
baseUrl = "http://vocab.getty.edu/"
class ULAN_CRM_Server(object):
def __init__(self):
self.cache = {}
self.json_cache = {}
self.DO_SOURCES = False
self.prop_data = {}
def expand_url(self, url):
url = url.replace('aat:', context_js['@context']['aat'])
url = url.replace('ulan:', context_js['@context']['ulan'])
url = url.replace('tgn:', context_js['@context']['tgn'])
return url
def fetch_graph(self, url, do_frame=True):
if len(url) < 5:
raise ValueError(url)
url = self.expand_url(url)
if url[2:].find("http://") > -1:
raise ValueError(url)
if not url.endswith('.ttl'):
url = url + ".ttl"
if url in self.json_cache:
return self.json_cache[url]
elif url in self.cache:
rdf = self.cache[url]
else:
print "Fetching %s" % url
fh = requests.get(url)
rdf = fh.text
fh.close()
self.cache[url] = rdf
g = ConjunctiveGraph()
try:
g.parse(data=rdf, format="turtle")
except:
# just treat like it doesn't exist
abort(404)
out = g.serialize(format='json-ld')
try:
out = out.decode('utf-8')
except:
pass
atjs = json.loads(out)
if do_frame:
j2 = {"@context": context_js, "@graph": atjs}
atjs = frame(j2, frame_js)
atjs = compact(atjs, context_js)
try:
del atjs['@context']
except:
pass
self.json_cache[url] = atjs
if len(self.json_cache) > 200:
print "JSON CACHE now %s" % (len(json_cache))
# Trash some out of it
return atjs
def clean_json(self, what):
togo = ["changeNote", "ccLicense", "created", "displayOrder", "identifier", \
"generatedBy", "license", "mappingRelation", "modified", "parentStr", \
"parentStrAbbr", "scheme", 'note']
def clean(po):
for p in po.keys():
if p in togo or p.startswith('broader'):
del po[p]
clean(what)
descend = ['altLabelObj', 'prefLabelObj', 'scopeNote', 'conceptFor']
for d in descend:
if d in what:
if type(what[d]) == list:
for po in what[d]:
clean(po)
else:
clean(what[d])
# Changes are by ref, so what is modified in place, but return it anyway
return what
def strip_ids(self, what):
try:
del what['id']
except:
pass
for v in what.values():
if not type(v) == list:
v = [v]
for vi in v:
if isinstance(vi, OrderedDict):
self.strip_ids(vi)
def data_exists(self, new, olds):
js = factory.toJSON(new)
self.strip_ids(js)
# internally not always a list, only at serialization
if not type(olds) == list:
olds = [olds]
for o in olds:
njs = factory.toJSON(o)
self.strip_ids(njs)
if js == njs:
return o
return False
def process_bio(self, who, bp, pref=True):
birth = bp.get('estStart', {'@value':''})['@value']
death = bp.get('estEnd', {'@value':''})['@value']
birthplace = bp.get('birthPlace', '')
deathplace = bp.get('deathPlace', '')
gender = bp.get('gender', '')
desc = bp.get('personDescription', '')
contrib = bp.get('contributor', '')
bev = BeginningOfExistence()
if birth:
bts = TimeSpan()
bts.begin_of_the_begin = birth
bts.end_of_the_end = birth
bev.timespan = bts
if birthplace:
bj = self.fetch_graph(birthplace.replace('-place', ''), False)
p = Place(self.expand_url(birthplace))
p.label = bj.get('label', bj.get('rdfs:label', bj.get('skos:prefLabel')))
bev.took_place_at = p
if (birth or birthplace):
if not hasattr(who, 'brought_into_existence_by') or not self.data_exists(bev, who.brought_into_existence_by):
who.brought_into_existence_by = bev
eev = EndOfExistence()
if death:
ets = TimeSpan()
ets.begin_of_the_begin = death
ets.end_of_the_end = death
eev.timespan = ets
if deathplace:
bj = self.fetch_graph(deathplace.replace('-place', ''))
p = Place(self.expand_url(deathplace))
p.label = bj.get('label', bj.get('rdfs:label', bj.get('skos:prefLabel')))
eev.took_place_at = p
if (death or deathplace):
if not hasattr(who, 'taken_out_of_existence_by') or not self.data_exists(eev, who.taken_out_of_existence_by):
who.taken_out_of_existence_by = eev
if gender and gender != "aat:300400512":
g = Gender()
g.classified_as = Type(gender)
gj = self.fetch_graph(gender)
g.label = gj['label']
if not hasattr(who, 'member_of') or not self.data_exists(g, who.member_of):
who.member_of = g
if desc:
bio = BiographyStatement()
bio.value = desc
ex = self.data_exists(bio, who.referred_to_by)
if ex:
bio = ex
else:
who.referred_to_by = bio
if contrib:
cre = Creation()
bio.created_by = cre
# XXX fetch and check Person / Group
cre.carried_out_by = Actor(self.expand_url(contrib))
def process_event(self, who, ep):
uri = ep['id']
start = ep.get('estStart', '')
end = ep.get('estEnd', '')
comment = ep.get('comment', '')
where = ep.get('location', '')
btyp = ep.get('bioType', '')
xuri = self.expand_url(uri)
if xuri == "http://vocab.getty.edu/aat/300393177":
ad = Active(xuri)
who.carried_out = ad
else:
ad = Activity(xuri)
ad.classified_as = Type(xuri)
who.present_at = ad
if start or end:
ts = TimeSpan()
ts.begin_of_the_begin = start or end
ts.end_of_the_end = end or start
ad.timespan = ts
if comment:
ts.label = comment
if where:
loc = Place(self.expand_url(ep['location']))
ad.took_place_at = loc
def process_term(self, new, old):
try:
new.value = old['literalValue']
except:
print repr(old)
if old.get('termKind', '') == "http://vocab.getty.edu/term/kind/Pseudonym":
new.classified_as = Type("http://vocab.getty.edu/aat/300404657")
if old.get('flag', '') == "http://vocab.getty.edu/term/flag/Vernacular":
new.classified_as = Type("http://vocab.getty.edu/aat/__vernacular")
if old.get('display', '') == "http://vocab.getty.edu/term/display/Indexing":
new.classified_as = Type("http://vocab.getty.edu/aat/300404668")
if self.DO_SOURCES:
self.process_source(new, old)
def process_source(self, new, old):
# dedupe multiple properties
srcs = {}
for x in ['sourcePref', 'sourceNonPref', 'source']:
if x in old:
vals = old[x]
if type(vals) != list:
vals = [vals]
for s in vals:
if type(s) == dict:
# part
url = self.expand_url(s['id'])
if url in srcs:
continue
part = InformationObject(url)
part.label = s['locator']
full = InformationObject(self.expand_url(s['partOf']))
part.composed_from = full
new.composed_from = part
srcs[part.id] = 1
else:
# string uri to source
url = self.expand_url(s)
if url in srcs:
continue
full = InformationObject(url)
new.composed_from = full
srcs[full.id] = 1
js = self.fetch_graph(full.id, False)
# Look through @graph list for full.id
if '@graph' in js:
for w in js['@graph']:
if self.expand_url(w['id']) == full.id:
# Only valuable thing is title, which is more description :(
# And shortTitle which isn't even a title...
try:
full.label = w['shortTitle']
except KeyError:
pass
try:
full.description = w['title']
except KeyError:
pass
def build_main(self, fn):
main = self.fetch_graph(fn)
self.clean_json(main)
uri = self.expand_url(main['id'])
# Now remodel in CRM
if "gvp:PersonConcept" in main['type']:
who = Person(uri)
else:
who = Group(uri)
return (main, who)
def process(self, main, who):
# xl:prefLabel --> preferred Appellation
pref = main['prefLabelObj']
name = PrimaryName()
if type(pref) != list:
pref = [pref]
for p in pref:
self.process_term(name, p)
who.identified_by = name
# Other labels --> Appellation
alo = main.get('altLabelObj', [])
if type(alo) != list:
alo = [alo]
for o in alo:
name = Appellation()
self.process_term(name, o)
who.identified_by = name
# agentType --> Group with P2
ats = main['agentType']
if type(ats) != list:
ats = [ats]
for pref in ats:
group = Group()
group.classified_as = Type(pref)
try:
gd = self.fetch_graph(pref)
except:
raise
labels = {}
plo = gd.get('prefLabelObj', [])
if type(plo) != list:
plo = [plo]
for pl in plo:
labels[pl['literalValue']['@language']] = pl['literalValue']['@value']
group.label = labels
who.member_of = group
# copy exactMatch, other than self (!!)
xMatch = main.get('exactMatch', [])
if type(xMatch) != list:
xMatch = [xMatch]
for m in xMatch:
if m != main['id']:
who.exact_match = Person(m)
cMatch = main.get('closeMatch', [])
if type(cMatch) != list:
cMatch = [cMatch]
if cMatch:
cl = set(main['closeMatch']).difference(set(xMatch))
for c in cl:
who.close_match = Person(c)
# scopeNote to Linguistic Object pattern to allow for source
if 'scopeNote' in main:
sn = main['scopeNote']
d = Description(self.expand_url(sn['id']))
d.value = {sn['value']['@language']: sn['value']['@value']}
if self.DO_SOURCES:
self.process_source(d, sn)
# seeAlso is a webpage
wp = WebPage(main['seeAlso'])
who.referred_to_by = wp
# copy void:inDataset ?
rels = main.get('related', [])
if type(rels) != list:
rels = [rels]
for k in main.keys():
if k.startswith("gvp:ulan"):
rel = main[k]
if type(rel) != list:
rel = [rel]
for r in rel:
ru = r['id']
try:
rels.remove(ru)
except:
pass
print "%s: %s" % (k, ru)
#if not ru in self.done and not ru in fn:
# fn.append(ru)
for r in rels:
# XXX Could be a group :S fetch and check
who.related = Person(r)
actor = main['conceptFor']
# event, eventPref, eventNonPref
ep = actor.get('eventPref', {})
if ep:
self.process_event(who, ep)
eps = actor.get('eventNonPref', [])
if type(eps) != list:
eps = [eps]
for ep in eps:
self.process_event(who, ep)
# nationality, nationalityPref, nationalityNonPref
np = actor.get('nationalityPref', {})
n = Nationality()
nj = self.fetch_graph(np)
n.label = nj['label']
n.classified_as = Type(self.expand_url(np))
who.member_of = n
nnp = actor.get('nationalityNonPref', [])
if type(nnp) != list:
nnp = [nnp]
# Remove "undetermined" as pointless
try:
nnp.remove('aat:300379012')
except:
pass
for np in nnp:
n = Nationality()
nj = self.fetch_graph(np)
n.label = nj['label']
n.classified_as = Type(self.expand_url(np))
who.member_of = n
# biography, biographyPref, biographyNonPref
# estStart, estEnd
# birthPlace, deathPlace
# gender
# personDescription
bp = actor['biographyPref']
self.process_bio(who, bp)
nbp = actor.get('biographyNonPref', [])
if type(nbp) != list:
nbp = [nbp]
for bp in nbp:
print "calling process_bio"
self.process_bio(who, bp, False)
def handle_id(self, ulan):
if not ulan.isdigit():
abort(404)
ulan = "http://vocab.getty.edu/ulan/%s" % ulan
(main, who) = self.build_main(ulan)
self.process(main, who)
ulfn = who.id.replace("ulan:", "")
#factory.toFile(who, compact=False, filename="data/%s.json" % ulfn)
response['content_type'] = "application/json"
response.status = 200
return factory.toString(who, compact=False)
def dispatch_views(self):
self.app.route('/<ulan>', ["get"], self.handle_id)
def after_request(self):
# Add CORS and other static headers
methods = 'PUT, PATCH, GET, POST, DELETE, OPTIONS, HEAD'
hdrs = 'ETag, Vary, Accept, Prefer, Content-type, Link, Allow, Content-location, Location'
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = methods
response.headers['Access-Control-Allow-Headers'] = hdrs
response.headers['Access-Control-Expose-Headers'] = hdrs
response.headers['Allow'] = methods
response.headers['Vary'] = "Accept, Prefer"
def get_bottle_app(self):
self.app = Bottle()
self.dispatch_views()
#self.app.hook('before_request')(self.before_request)
self.app.hook('after_request')(self.after_request)
#self.app.error_handler = self.get_error_handler()
return self.app
svc = ULAN_CRM_Server()
if __name__ == "__main__":
fn = "context.json"
else:
fn = '/home/azaroth/web_services/ulan_crm/context.json'
# Base ULAN context and frame
fh = file(fn)
ctxt = fh.read()
fh.close()
context_js = json.loads(ctxt)
frame_js = {"@context": context_js['@context'],
"type": "skos:Concept",
"contributor": {"@embed": False},
"source": {"@embed": False},
"changeNote": {"@embed": False},
"note": {"@embed": False},
"mappingRelation": {"@embed": False},
"exactMatch": {"@embed": False},
"closeMatch": {"@embed": False}
}
if __name__ == "__main__":
run(host="localhost", port="8888", app=svc.get_bottle_app(), debug=True)
else:
application = svc.get_bottle_app()
|
from typing import List
import requests, json
from guet.steps.action import Action
class RemoveCommitterAction(Action):
def __init__(self, committers):
super().__init__()
self.committers = committers
def execute(self, args: List[str]):
committer = self.committers.by_initials(args[0])
headers = {'Content-type': 'application/json',}
if not committer:
print(f'No committer exists with initials {args[0]}')
else:
message = args[0]+" is removed as a committer"
temp = {"text": message}
data = json.dumps(temp)
with open('guet/commands/webhook.json', 'r') as f:
url = json.loads(f.read())
requests.post(url["URL"], headers=headers, data=data)
self.committers.remove(committer.initials)
|
from __future__ import division
import math
import LLRcalc
class SPRT:
"""
This class performs a GSPRT for H0:elo=elo0 versus H1:elo=elo1
See here for a description of the GSPRT as well as theoretical
(asymptotic) results.
http://stat.columbia.edu/~jcliu/paper/GSPRT_SQA3.pdf
In addition we do overshoot correction as in Siegmund - Sequential Analysis.
To record the outcome of a game pair use the method record(result)
where "result" is a half integer in the interval [0,2]
"""
def __init__(
self,
alpha=0.05,
beta=0.05,
elo0=0,
elo1=5,
mode="pentanomial",
elo_model="logistic",
):
self.elo0 = elo0
self.elo1 = elo1
assert elo_model in ("logistic", "normalized")
assert mode in ("trinomial", "pentanomial")
if mode == "pentanomial":
self.results_ = 5 * [0]
else:
self.results_ = 3 * [0]
self.LA = math.log(beta / (1 - alpha))
self.LB = math.log((1 - beta) / alpha)
self.status_ = ""
self.LLR_ = 0.0
self.min_LLR = 0.0
self.max_LLR = 0.0
self.sq0 = 0.0
self.sq1 = 0.0
self.o0 = 0.0
self.o1 = 0.0
self.elo_model = elo_model
def record(self, result):
if self.status_ != "":
return
self.results_[result] += 1
if self.elo_model == "logistic":
self.LLR_ = LLRcalc.LLR_logistic(self.elo0, self.elo1, self.results_)
else:
self.LLR_ = LLRcalc.LLR_normalized(self.elo0, self.elo1, self.results_)
# Dynamic overshoot correction using
# Siegmund - Sequential Analysis - Corollary 8.33.
if self.LLR_ > self.max_LLR:
self.sq1 += (self.LLR_ - self.max_LLR) ** 2
self.max_LLR = self.LLR_
self.o1 = self.sq1 / self.LLR_ / 2
if self.LLR_ < self.min_LLR:
self.sq0 += (self.LLR_ - self.min_LLR) ** 2
self.min_LLR = self.LLR_
self.o0 = -self.sq0 / self.LLR_ / 2
if self.LLR_ > self.LB - self.o1:
self.status_ = "H1"
elif self.LLR_ < self.LA + self.o0:
self.status_ = "H0"
def status(self):
return self.status_
def length(self):
l = len(self.results_)
return ((l - 1) / 2) * sum(self.results_)
def LLR(self):
return self.LLR_
def results(self):
return self.results_
|
def some_dead_code():
if 5 == 4:
print("5 = 4")
elif 4 == 3:
print("4 = 3")
x = 4
y = 5
def main():
a = 5
b = 6
c = 7
# Dead code
if False:
a = b
print(a)
# Augmented Assignment
c += 3
# Multiple Target Assignment
d = b = a = c
some_dead_code()
"""
another Augmented Assignment
"""
d *= 2 + 2
e = int(input())
# If redundant lines
if c + e > 14:
print(c)
print(e)
d *= 2 + 2
elif c + d > 15:
print(d)
print(e)
d *= 2 + 2
else:
print(a)
print(e)
d *= 2 + 2
# Multiple Operator Comparison
if a <= e <= d <= c:
print(e)
else:
print("hello")
# Outer Not Elimination
if not ((a and b) or (c or d)):
print(e)
# Comparison Unification
if a < e or b <= c:
print(d)
if __name__ == '__main__':
main()
|
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.typing import TensorType, TensorShape
tf1, tf, tfv = try_import_tf()
class GRUGate(tf.keras.layers.Layer if tf else object):
def __init__(self, init_bias: float = 0.0, **kwargs):
super().__init__(**kwargs)
self._init_bias = init_bias
def build(self, input_shape: TensorShape):
h_shape, x_shape = input_shape
if x_shape[-1] != h_shape[-1]:
raise ValueError(
"Both inputs to GRUGate must have equal size in last axis!"
)
dim = int(h_shape[-1])
self._w_r = self.add_weight(shape=(dim, dim))
self._w_z = self.add_weight(shape=(dim, dim))
self._w_h = self.add_weight(shape=(dim, dim))
self._u_r = self.add_weight(shape=(dim, dim))
self._u_z = self.add_weight(shape=(dim, dim))
self._u_h = self.add_weight(shape=(dim, dim))
def bias_initializer(shape, dtype):
return tf.fill(shape, tf.cast(self._init_bias, dtype=dtype))
self._bias_z = self.add_weight(shape=(dim,), initializer=bias_initializer)
def call(self, inputs: TensorType, **kwargs) -> TensorType:
# Pass in internal state first.
h, X = inputs
r = tf.tensordot(X, self._w_r, axes=1) + tf.tensordot(h, self._u_r, axes=1)
r = tf.nn.sigmoid(r)
z = (
tf.tensordot(X, self._w_z, axes=1)
+ tf.tensordot(h, self._u_z, axes=1)
- self._bias_z
)
z = tf.nn.sigmoid(z)
h_next = tf.tensordot(X, self._w_h, axes=1) + tf.tensordot(
(h * r), self._u_h, axes=1
)
h_next = tf.nn.tanh(h_next)
return (1 - z) * h + z * h_next
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
driver = webdriver.Chrome()
driver.get("http://localhost/litecart/en/")
eml = "test8@test.com"
passw = "11111111"
def login():
driver.find_element_by_xpath(".//*[@id='navigation']//input[@name='email']").send_keys(eml)
driver.find_element_by_xpath(".//*[@id='navigation']//input[@name='password']").send_keys(passw)
driver.find_element_by_xpath(".//*[@id='navigation']//*[@name='login']").click()
def logout():
driver.implicitly_wait(5)
driver.find_element_by_xpath(".//*[@class='content']//a[contains(@href,'logout')]").click()
def registration():
driver.find_element_by_xpath(".//*[@id='navigation']//*[contains(@href,'account')]").click()
country = Select(driver.find_element_by_xpath(".//td[contains(.,'Country')]//select"))
taxId = driver.find_element_by_xpath(".//input[contains(@name,'tax_id')]")
company = driver.find_element_by_xpath(".//input[contains(@name,'company')]")
first_name = driver.find_element_by_xpath(".//input[contains(@name,'first')]")
lastName = driver.find_element_by_xpath(".//input[contains(@name,'last')]")
address_1 = driver.find_element_by_xpath(".//input[contains(@name,'address1')]")
postcode = driver.find_element_by_xpath(".//input[contains(@name,'postcode')]")
city = driver.find_element_by_xpath(".//input[contains(@name,'city')]")
email = driver.find_element_by_xpath(".//input[contains(@name,'email')]")
phone = driver.find_element_by_xpath(".//input[contains(@name,'phone')]")
password = driver.find_element_by_xpath(".//input[contains(@name,'passw')]")
conf_password = driver.find_element_by_xpath(".//input[contains(@name,'confirmed_password')]")
subm_button = driver.find_element_by_xpath(".//*[contains(@type,'submit')]")
country.select_by_value("US")
taxId.send_keys("Test taxId")
company.send_keys("my Company")
first_name.send_keys("Sergey")
lastName.send_keys("Lastname")
address_1.send_keys("Street")
postcode.send_keys("12345")
city.send_keys("Odessa")
email.send_keys(eml)
phone.clear()
phone.send_keys("+380931478523")
password.send_keys(passw)
conf_password.send_keys(passw)
subm_button.click()
logout()
login()
logout()
driver.close()
registration() |
"""КР, Ларькин Владимир, М8О-303Б-18"""
import time
import numpy as np
import fire # CLI
import multiprocessing as mp
from typing import List
from tqdm import tqdm # прогресс-бары
import matplotlib.pyplot as plt
from utilities import parse_matrix # парсинг матрицы из файла
from sem1.lab1_1.gauss import lu_solve
def lu_decomposition(matrix: np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray):
"""LU-разложение матрицы с выбором главного элемента.
Так как в процессе разложения в матрице переставляются строки,
дополнительно возвращается матрица перестановок P.
:param matrix: входная матрица
:return: кортеж из матриц P, L, U
"""
# матрицы обязаны быть квадратными массивами размерности 2
assert matrix.shape[0] == matrix.shape[1] and len(matrix.shape) == 2
n = matrix.shape[0]
l = np.zeros_like(matrix)
u = np.copy(matrix)
p = np.identity(n)
for j in range(n - 1):
m = np.abs(u[j:, j]).argmax() + j
p[[j, m]] = p[[m, j]]
l[[j, m]] = l[[m, j]]
u[[j, m]] = u[[m, j]]
for i in range(j + 1, n):
l[i, j] = u[i, j] / u[j, j]
for j in range(n - 1):
for i in range(j + 1, n):
for k in range(u.shape[1]):
u[i, k] -= u[j, k] * l[i, j]
l[np.diag_indices(n)] = 1
return p, l, u
def split(a: np.ndarray, num: int) -> List[np.ndarray]:
"""Разделяет NumPy массив a на num - 1 равных частей + 1 часть из того, что осталось"""
if num > len(a):
return np.split(a, len(a))
bound = len(a) // num * num
res = np.split(a[:bound], num)
rem = len(a) - len(a) // num * num
if rem > 0:
res[-1] = np.concatenate([res[-1], a[-rem:]])
return res
def divide(data):
b, c = data
return b / c
def subtract(data):
a, b, c = data
return a - b.reshape(-1, 1) @ c.reshape(1, -1)
def lu_decomposition_parallel(matrix: np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray):
"""Параллельное LU-разложение матрицы с выбором главного элемента.
Так как в процессе разложения в матрице переставляются строки,
дополнительно возвращается матрица перестановок P.
:param matrix: входная матрица
:return: кортеж из матриц P, L, U
"""
if matrix.shape[0] != matrix.shape[1] or len(matrix.shape) != 2:
raise ValueError("Матрицы обязаны быть квадратными массивами размерности 2")
n = matrix.shape[0]
l = np.zeros_like(matrix)
u = np.copy(matrix)
p = np.identity(n)
proc_count = mp.cpu_count()
pool = mp.Pool(proc_count)
for j in range(n - 1):
m = np.abs(u[j:, j]).argmax() + j
p[[j, m]] = p[[m, j]]
l[[j, m]] = l[[m, j]]
u[[j, m]] = u[[m, j]]
# аналогично l[j + 1:, j] = u[j + 1:, j] / u[j, j], но параллельно
b = split(u[j + 1:, j], proc_count)
c = [u[j, j]] * len(b)
data_in = zip(b, c)
data_out = pool.map(divide, data_in)
l[j + 1:, j] = np.hstack(data_out)
# for i in range(j + 1, n):
# u[i, :] -= u[j, :] * l[i, j]
# for i in range(j + 1, n):
# for k in range(n):
# u[i, k] -= u[j, k] * l[i, j]
# аналогично^, но одним умножением
# так оказалось быстрее, чем inplace
u[j + 1:, :] = u[j + 1:, :] - l[j + 1:, j].reshape(-1, 1) @ u[j, :].reshape(1, -1)
# попытка распараллелить вот это^, но результат хуже, чем у простого цикла
# a = split(u[j + 1:, :], proc_count)
# b = split(l[j + 1:, j], proc_count)
# c = [u[j, :]] * len(a)
# data_in = zip(a, b, c)
# data_out = pool.map(subtract, data_in)
# u[j + 1:, :] = np.vstack(data_out)
l[np.diag_indices(n)] = 1
pool.close()
return p, l, u
def main(src=None, test=False, shape=50, it=500):
"""Решение СЛАУ методом Гаусса с применением LU-разложения
:param src: путь к текстовому файлу с матрицей
:param test: флаг, запускающий тестирование
:param shape: размер матриц, генерирующихся при тестировании
:param it: число повторений тестирования
"""
np.random.seed(42)
if src is not None:
# чтение файла
with open(src, "r") as file:
s = file.readlines()
matrix = parse_matrix(s)
a = matrix[:, :-1]
b = matrix[:, -1]
print("A:", a, sep="\n")
print("b:", b)
p, l, u = lu_decomposition_parallel(a)
print(f"PLU:\n{p.T @ l @ u}")
print(f"PLU == A: {np.allclose(p.T @ l @ u, a)}")
print(f"P:\n{p}\nL:\n{l}\nU:\n{u}")
x = lu_solve(l, u, p @ b)
print(f"Решение системы: {x}")
# тесты на случайно сгенерированных матрицах
if test:
run_test(shape, it)
def run_test(shape: int, it: int):
"""Тестирование LU-разложения и решения СЛАУ с замером времени и сравнением с функциями из numpy и scipy.
:param shape: размер матриц
:param it: количество тестов
"""
print("\nТест решения СЛАУ:")
times_my = {}
times_par = {}
shapes = list(range(10, shape, 10))
for shape in tqdm(shapes):
times_my[shape] = []
times_par[shape] = []
for _ in tqdm(range(it)):
a = np.random.rand(shape, shape) * 100
b = np.random.rand(shape) * 100
prev = time.time_ns()
p, l, u = lu_decomposition(a)
_ = lu_solve(l, u, p @ b)
times_my[shape].append((time.time_ns() - prev) / 1e9)
if not np.allclose(p.T @ l @ u, a):
print("Обычная")
print(a)
print(l)
print(u)
break
prev = time.time_ns()
pp, lp, up = lu_decomposition_parallel(a)
_ = lu_solve(lp, up, pp @ b)
times_par[shape].append((time.time_ns() - prev) / 1e9)
if not np.allclose(pp.T @ lp @ up, a):
print("Параллельная")
print("L:\n", l)
print("Lp:\n", lp)
print("A:\n", a)
break
means = {}
for name, d in zip(["Параллельная", "Последовательная"], [times_par, times_my]):
means[name] = []
for key in d:
means[name].append(np.average(d[key]))
for key in means:
plt.plot(shapes, means[key], label=key)
plt.legend()
plt.grid(True)
plt.title("Зависимость времени выполнения от размерности матрицы")
plt.xlabel("Размерность матрицы")
plt.ylabel("Время в секундах")
plt.savefig("plot.jpg", dpi=300)
plt.show()
if __name__ == "__main__":
fire.Fire(main)
|
#!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Tools for unittests that require storing video files on disk.
#
import os
import shutil
import tempfile
import unittest
import encoder
def InitWorkDir():
dirname = tempfile.mkdtemp(prefix='codec-unittest-workdir')
if not os.path.isdir(dirname):
os.mkdir(dirname)
os.environ['CODEC_WORKDIR'] = dirname
return dirname
def MakeYuvFileWithOneBlankFrame(name):
""" Make an YUV file with one black frame.
The size of the frame is encoded in the filename."""
videofile = encoder.Videofile('%s/%s' % (os.getenv('CODEC_WORKDIR'),
name))
# Frame size in an YUV 4:2:0 file is 1.5 bytes per pixel.
framesize = videofile.width * videofile.height * 3 / 2
with open(videofile.filename, 'w') as real_file:
real_file.write('\0' * framesize)
return videofile
def FinishWorkDir(dirname):
# Verification of validity
if os.environ['CODEC_WORKDIR'] != dirname:
raise encoder.Error('Dirname was wrong in FinishWorkDir')
shutil.rmtree(dirname)
class FileUsingCodecTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._workdir = InitWorkDir()
@classmethod
def tearDownClass(cls):
FinishWorkDir(cls._workdir)
|
from flask import(
Flask,
render_template,
flash,
redirect,
url_for,
session,
request,
logging
)
from . import mensagens as bp
from .. import mysql
from ..formularios import MessageForm
##############################################################
##############################################################
# MENSAGENS
##############################################################
##############################################################
@bp.route('/conversa/<string:id>', methods=['GET', 'POST'])
def conversa(id):
if 'usuario_id' in session:
form = MessageForm(request.form)
# Create cursor
db = mysql.connection.cursor()
# lid name
get_result = db.execute("SELECT * FROM usuarios WHERE id=%s", [id])
l_data = db.fetchone()
if get_result > 0:
session['nome'] = l_data['nome']
usuario_id = session['usuario_id']
session['lid'] = id
if request.method == 'POST' and form.validate():
txt_body = form.conteudo.data
# Create cursor
db = mysql.connection.cursor()
db.execute("INSERT INTO mensagens(conteudo, autor, destinatario) VALUES(%s, %s, %s)",
(txt_body, id, usuario_id))
# Commit cursor
mysql.connection.commit()
# Get usuarios
db.execute("SELECT * FROM usuarios")
usuarios = db.fetchall()
# Close Connection
db.close()
return render_template('conversa.html', usuarios=usuarios, form=form)
else:
flash('Sem permissão!', 'danger')
return redirect(url_for('inicio.inicio'))
else:
return redirect(url_for('autorizar.entrar'))
@bp.route('/conversas', methods=['GET', 'POST'])
def conversas():
if 'lid' in session:
id = session['lid']
usuario_id = session['usuario_id']
# Create cursor
db = mysql.connection.cursor()
# Get message
db.execute("SELECT * FROM mensagens WHERE (autor=%s AND destinatario=%s) OR (autor=%s AND destinatario=%s) "
"ORDER BY id ASC", (id, usuario_id, usuario_id, id))
conversas = db.fetchall()
# Close Connection
db.close()
return render_template('conversas.html', conversas=conversas, )
return redirect(url_for('autorizar.entrar'))
|
import os
import time
from collections import Counter
import numpy as np
from garageofcode.kaggle.word2vec import get_words as get_words_list
def get_top_1000_words():
data_dir = "/home/jdw/garageofcode/data/"
fn = os.path.join(data_dir, "1-1000.txt")
return [line for line in open(fn, "r").read().split()]
def get_sentences():
fn = "/home/jdw/garageofcode/data/compression/big.txt"
text = open(fn, "r").read()
#text = text.replace("\n", " ")
text = "".join([" " if ch in "\n;,:-\"" else ch for ch in text])
text = "".join(["." if ch in "!?" else ch for ch in text])
sentences = [sentence.strip() for sentence in text.split(".") if len(sentence.strip().split(" ")) == 15]
return sentences
def test_category(get_items):
while True:
elems = get_items()
s = "".join(["%s " %elem for elem in elems])
line = ">> " + s
print(line, end="\r")
time.sleep(5)
print(" "*100, end="\r")
try:
ans = input(">> ").split(" ")
except KeyboardInterrupt:
print()
break
c_elems = Counter(elems)
c_ans = Counter(ans)
if c_elems == c_ans:
print(" " + s)
print("Correct")
else:
print(" " + s)
try:
input()
except KeyboardInterrupt:
print()
break
print()
def main():
get_ints = lambda: [str(i) for i in np.random.randint(0, 10, size=[7])]
get_letters = lambda: [chr(i) for i in np.random.randint(97, 123, size=[7])]
#words_list = get_words_list()
#get_words = lambda: np.random.choice(words_list, size=[5])
words_list_1000 = get_top_1000_words()
get_words_1000 = lambda: np.random.choice(words_list_1000, size=[7])
sentences = get_sentences()
get_sentence = lambda: [np.random.choice(sentences)]
test_category(get_sentence)
#for i in range(10):
# print("line %d" %i, end="\r")
# time.sleep(0.5)
if __name__ == '__main__':
main()
|
import os, sys; sys.path.insert(0, os.path.join("..", ".."))
import re
from pattern.web import Google, URL
from pattern.web import Document, plaintext
# An interesting experiment on how to use the Google API
# and http://amplicate.com for opinion mining.
# (let's hope we get a real Amplicate API soon!)
query = "smurf"
# An example result, containing all the information we need:
# URL: http://amplicate.com/love/george-w-bush
# Title: <b>George</b> W <b>Bush</b> Hate - 64% People Agree (803 opinions)
for r in Google().search(query+" site:amplicate.com"):
print r.title
u = URL(r.url)
if "love" in u.path \
or "hate" in u.path:
b = True
p = u.page.lower().replace("-", "")
for i, w in enumerate(query.lower().replace("-", " ").split()):
if i == 0 and not p.startswith(w):
b=False; break
if w not in p:
b=False; break
if b:
love = "love" in u.path
f = int(re.search("- ([0-9]{1,3})%", r.title).group(1)) * 0.01
n = int(re.search("\(([0-9]+) opinions", r.title).group(1))
print r.title
print r.url
print "love:", love and f or (1-f)
print "hate:", love and (1-f) or f
print "opinions:", int(round(n / f))
print
# Of course we can dig in deeper by following the link to r.url,
# but that would classify as screen-scraping.
#dom = Document(u.download())
#for p in dom.by_tag("p.comment-body"):
# print plaintext(p.content)
# print
#break
|
"""Compile embedding evaluation stats and generate plots as presented in Van Paridon & Thompson (2020)."""
import numpy as np
import pandas as pd
import os
import seaborn as sns
import matplotlib.pyplot as plt
import argparse
path = os.path.dirname(__file__)
df_corpus = pd.read_csv(os.path.join(path, 'paper_results', 'table_data.tsv'), sep='\t')
sns.set(context='paper', style='whitegrid', font_scale=1.0, rc={'grid.color': '.9', 'grid.linewidth': '.5'})
sns.set_palette('Set2') # use MPI for Psycholinguistics style color palette
def gather_similarities(folder):
"""Compile all semantic similarities evaluation results.
:param folder: directory where the results are located
:return: pandas DataFrame containing all the similarities results
"""
df = pd.DataFrame(columns=['rank r', 'adjusted rank r', 'vecs', 'source', 'lang'])
for fname in sorted(os.listdir(folder)):
if not fname.startswith('cc'):
df_temp = pd.read_csv(os.path.join(folder, fname), sep='\t')
df_temp['source'] = df_temp['source'].apply(lambda x: ' '.join(x.split("-")[1:]))
df_temp['source'] = df_temp['source'].str.replace('.tsv', '')
df_temp['vecs'] = fname.split('.')[0]
df_temp['vecs'] = df_temp['vecs'].str.replace('cc', 'fasttext').replace('wiki-subs', 'wiki+subs')
df_temp['lang'] = fname.split('.')[1]
df = df.append(df_temp, ignore_index=True)
df = df.loc[df['source'].apply(lambda x: False if x.endswith('rel') else True)]
df = df.loc[df['source'].apply(lambda x: False if x.endswith('no') else True)]
df = df.loc[(df['lang'] != 'en') | (df['source'] != 'wordsim353 all')]
df['source'] = df['source'].str.replace(' sim', '')
df['source'] = df['source'].str.replace(' all', '')
df = df.merge(df_corpus[['lang', 'vecs', 'language']], how='left', on=['lang', 'vecs'])
df['label'] = df.apply(lambda x: f'{x["language"]}: {x["source"]}'.lower(), axis=1)
df = df.sort_values(['label', 'vecs'])
return df
def gather_analogies(folder):
"""Compile all analogies evaluation results.
:param folder: directory where the results are located
:return: pandas DataFrame containing all the analogies results
"""
df = pd.DataFrame(columns=['score', 'adjusted score', 'vecs', 'source', 'lang'])
for fname in sorted(os.listdir(folder)):
if not fname.startswith('cc'):
df_temp = pd.read_csv(os.path.join(folder, fname), sep='\t')
df_temp['source'] = df_temp['source'].apply(lambda x: ' '.join(x.split("-")[1:]))
df_temp['source'] = df_temp['source'].str.replace('.tsv', '')
df_temp['source'] = df_temp['source'].str.replace('_nocountries', ' (no geo)')
df_temp['source'] = df_temp['source'].str.replace('_no_countries', ' (no geo)')
df_temp['source'] = df_temp['source'].str.replace(' google', '')
df_temp['vecs'] = fname.split('.')[0]
df_temp['vecs'] = df_temp['vecs'].str.replace('cc', 'fasttext').replace('wiki-subs', 'wiki+subs')
df_temp['lang'] = fname.split('.')[1]
df = df.append(df_temp, ignore_index=True)
df = df.merge(df_corpus[['lang', 'vecs', 'language']], how='left', on=['lang', 'vecs'])
df['label'] = df.apply(lambda x: f'{x["language"]}: {x["source"]}'.lower(), axis=1)
df = df.loc[df['source'].apply(lambda x: False if x.endswith('semrel') else True)]
df = df.loc[df['source'].apply(lambda x: False if x.endswith('bless') else True)]
df = df.sort_values(['label', 'vecs'])
return df
def gather_norms(folder):
"""Compile all lexical norms evaluation results.
:param folder: directory where the results are located
:return: pandas DataFrame containing all the lexical norms results
"""
df = pd.DataFrame(columns=['norm', 'adjusted r', 'adjusted r-squared', 'r-squared', 'vecs', 'source', 'lang'])
for fname in sorted(os.listdir(folder)):
if not fname.startswith('cc'):
df_temp = pd.read_csv(os.path.join(folder, fname), sep='\t')
df_temp['source'] = df_temp['source'].str.replace('.tsv', '')
df_temp['source'] = df_temp['source'].apply(lambda x: f'{"-".join(x.split("-")[1:-1])} ({x.split("-")[-1]})')
df_temp['vecs'] = fname.split('.')[0]
df_temp['vecs'] = df_temp['vecs'].str.replace('cc', 'fasttext').replace('wiki-subs', 'wiki+subs')
df_temp['lang'] = fname.split('.')[1]
df = df.append(df_temp, ignore_index=True)
df_norms = df.loc[df['source'] != 'binder (2016)'].copy()
df_norms = df_norms.merge(df_corpus[['lang', 'vecs', 'language']], how='left', on=['lang', 'vecs'])
df_norms['label'] = df_norms.apply(lambda x: f'{x["language"]}: {x["source"]} {x["norm"]}'.lower(), axis=1)
df_norms = df_norms.sort_values(['label', 'vecs'])
df_binder = df.loc[df['source'] == 'binder (2016)'].copy()
df_binder = df_binder.loc[(df_binder['norm'] != 'mean r') & (df_binder['norm'] != 'word length')]
df_binder['label'] = df_binder['norm']
df_binder = df_binder.sort_values(['label', 'vecs'])
return df_norms, df_binder
def _plot_scores(df, xlabel, aspect=.5):
g = sns.catplot(x=xlabel, y='label', kind='bar', data=df, legend=False,
hue='vecs', hue_order=['wiki+subs', 'subs', 'wiki'],
height=len(df) / 12, aspect=aspect
)
g.set(xticks=(0, .2, .4, .6, .8, 1))
g.ax.yaxis.tick_right()
g.despine(left=True, right=False)
g.set(xlim=(1.1, 0), ylabel=None)
g.ax.legend(loc='upper left', bbox_to_anchor=(1.1, 0), frameon=False)
return g
def _plot_wordcounts(df):
df_means = df.groupby(['lang', 'vecs', 'kind'], as_index=False).mean()
df_means['log10 wordcount'] = np.log10(df_means['words'])
df_means['wordcount-adjusted score'] = df_means['score'] / df_means['log10 wordcount']
df_subs = df_means.loc[df_means['vecs'] == 'subs'].rename(columns={'wordcount-adjusted score': 'wordcount-adjusted score for subtitle vectors'}).reset_index()
df_wiki = df_means.loc[df_means['vecs'] == 'wiki'].rename(columns={'wordcount-adjusted score': 'wordcount-adjusted score for wikipedia vectors'}).reset_index()
df_means = df_subs
df_means['wordcount-adjusted score for wikipedia vectors'] = df_wiki['wordcount-adjusted score for wikipedia vectors']
g = sns.relplot(kind='scatter', data=df_means, hue='kind',
x='wordcount-adjusted score for wikipedia vectors', y='wordcount-adjusted score for subtitle vectors',
height=4, aspect=.8,
)
g._legend.remove()
g.ax.legend(loc='lower right', frameon=False)
g.ax.legend_.texts[0].set_text('')
g.ax.plot([0, .11], [0, .11], linestyle='--', color='lightgray')
g.set(xlim=(0, .11), ylim=(0, .11))
return g
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='generate plots from the subs2vec paper')
argparser.add_argument('--unadjusted', action='store_true', help='generate plots from scores not adjusted for missing data')
args = argparser.parse_args()
if args.unadjusted:
prefix = ''
else:
prefix = 'adjusted '
# create dataframes
df_analogies = gather_analogies(os.path.join(path, 'paper_results', 'analogies'))
df_analogies.to_csv('analogies.tsv', sep='\t', index=False)
df_similarities = gather_similarities(os.path.join(path, 'paper_results', 'similarities'))
df_similarities.to_csv('similarities.tsv', sep='\t', index=False)
df_norms, df_binder = gather_norms(os.path.join(path, 'paper_results', 'norms'))
df_norms.to_csv('norms.tsv', sep='\t', index=False)
df_binder.to_csv('binder.tsv', sep='\t', index=False)
# cut norms and binder dataframes up so the plots will fit on a page
chunk = int(len(df_norms) / 12)
df_norms1 = df_norms.iloc[range(chunk * 3)]
df_norms2 = df_norms.iloc[range(chunk * 3, chunk * 6)]
df_norms3 = df_norms.iloc[range(chunk * 6, chunk * 9)]
df_norms4 = df_norms.iloc[range(chunk * 9, len(df_norms))]
df_binder1 = df_binder.iloc[range(int(int(len(df_binder) / 3) / 2) * 3)]
df_binder2 = df_binder.iloc[range(int(int(len(df_binder) / 3) / 2) * 3, len(df_binder))]
# draw barplots
g_analogies = _plot_scores(df_analogies, f'{prefix}score', .7)
plt.tight_layout()
plt.savefig('analogies.pdf')
plt.savefig('analogies.png', dpi=600)
plt.clf()
g_similarities = _plot_scores(df_similarities, f'{prefix}rank r', .5)
plt.tight_layout()
plt.savefig('similarities.pdf')
plt.savefig('similarities.png', dpi=600)
plt.clf()
g_norms1 = _plot_scores(df_norms1, f'{prefix}r', .5)
plt.tight_layout()
plt.savefig('norms1.pdf')
plt.savefig('norms1.png', dpi=600)
plt.clf()
g_norms2 = _plot_scores(df_norms2, f'{prefix}r', .5)
plt.tight_layout()
plt.savefig('norms2.pdf')
plt.savefig('norms2.png', dpi=600)
plt.clf()
g_norms3 = _plot_scores(df_norms3, f'{prefix}r', .5)
plt.tight_layout()
plt.savefig('norms3.pdf')
plt.savefig('norms3.png', dpi=600)
plt.clf()
g_norms4 = _plot_scores(df_norms4, f'{prefix}r', .5)
plt.tight_layout()
plt.savefig('norms4.pdf')
plt.savefig('norms4.png', dpi=600)
plt.clf()
g_binder1 = _plot_scores(df_binder1, f'{prefix}r', .5)
plt.tight_layout()
plt.savefig('binder1.pdf')
plt.savefig('binder1.png', dpi=600)
plt.clf()
g_binder2 = _plot_scores(df_binder2, f'{prefix}r', .5)
plt.tight_layout()
plt.savefig('binder2.pdf')
plt.savefig('binder2.png', dpi=600)
plt.clf()
if not args.unadjusted:
# draw scatterplot
df_a = df_analogies[['lang', 'source', 'vecs', 'adjusted score']].rename(columns={'adjusted score': 'score'})
df_s = df_similarities[['lang', 'source', 'vecs', 'adjusted rank r']].rename(columns={'adjusted rank r': 'score'})
df_n = df_norms[['lang', 'source', 'vecs', 'adjusted r']].rename(columns={'adjusted r': 'score'})
df_a['kind'] = 'analogies'
df_s['kind'] = 'similarities'
df_n['kind'] = 'norms'
df_wordcounts = pd.concat([df_a, df_s, df_n])
df_wordcounts = df_wordcounts.merge(df_corpus[['lang', 'vecs', 'words']], how='inner', on=['lang', 'vecs'])
df_wordcounts.to_csv('model_data.tsv', sep='\t', index=False)
sns.set_palette(sns.color_palette('Set2')[3:]) # skip the first three colors, because we use those to label training corpus
g_wordcounts = _plot_wordcounts(df_wordcounts.dropna())
plt.tight_layout()
plt.savefig('wordcounts.pdf')
plt.savefig('wordcounts.png', dpi=600)
plt.clf()
|
from unittest import TestCase
import numpy as np
import torch
from strimadec.models.modules import RNN
class testRNN(TestCase):
BATCH_SIZE = 5
SEED = 7
def setUp(self):
np.random.seed(testRNN.SEED)
# (deterministic) random setup of RNN
self.random_config = testRNN.build_random_config()
return
def test_RNN_forward_works_as_expected(self):
batch_size = testRNN.BATCH_SIZE
img_channels, img_dim = self.random_config["img_channels"], self.random_config["img_dim"]
latent_space_dim = self.random_config["latent_space_dim"]
hidden_state_dim = self.random_config["hidden_state_dim"]
output_size = self.random_config["output_size"]
rnn = RNN(self.random_config)
# generate fake data
fake_img = 10 * torch.randn([batch_size, img_channels, img_dim, img_dim])
fake_z_im1 = torch.randn([batch_size, latent_space_dim])
fake_h_im1 = torch.randn([batch_size, hidden_state_dim])
# run forward
omega_i, h_i = rnn(fake_img, fake_z_im1, fake_h_im1)
# check shapes
self.assertTrue(omega_i.shape == torch.Size([batch_size, output_size]))
self.assertTrue(h_i.shape == torch.Size([batch_size, hidden_state_dim]))
# check initialization worked
expected_omega_i = torch.tensor(self.random_config["output_bias_init"]).repeat(
batch_size, 1
)
expected_omega_i[:, 0] = torch.sigmoid(expected_omega_i[:, 0])
self.assertTrue((expected_omega_i - omega_i).pow(2).sum() < 1e-12)
# check that we can rerun using h_i
fake_h_im1 = h_i
# run forward
omega_i, h_i = rnn(fake_img, fake_z_im1, fake_h_im1)
# check shapes
self.assertTrue(omega_i.shape == torch.Size([batch_size, output_size]))
self.assertTrue(h_i.shape == torch.Size([batch_size, hidden_state_dim]))
# check initialization worked
expected_omega_i = torch.tensor(self.random_config["output_bias_init"]).repeat(
batch_size, 1
)
expected_omega_i[:, 0] = torch.sigmoid(expected_omega_i[:, 0])
self.assertTrue((expected_omega_i - omega_i).pow(2).sum() < 1e-12)
return
@staticmethod
def build_random_config():
z_pres_dim, z_where_dim, z_what_dim = 1, 3, np.random.randint(10, 20)
p_pres_init = np.random.rand(1, 1)
mean_z_where_init = np.random.randint(1, 5, size=(1, 3))
log_var_z_where_init = np.random.randint(1, 5, size=(1, 3))
output_bias_init = np.concatenate((p_pres_init, mean_z_where_init, log_var_z_where_init), 1)
config = {
"baseline_net": False,
"img_channels": np.random.randint(1, 5),
"img_dim": 8 * np.random.randint(4, 8),
"hidden_state_dim": np.random.randint(25, 100),
"latent_space_dim": z_pres_dim + z_where_dim + z_what_dim,
"FC_hidden_dims": np.random.randint(1, 100, size=(np.random.randint(1, 4))),
"output_size": z_pres_dim + 2 * z_where_dim,
"output_bias_init": output_bias_init,
}
return config |
import copy
import itertools
from graph_generator import generate_complete_graph
from models import LinearLayout, Graph
from view import show_linear_layouts
def observation_1(show_layouts=True):
"""
Generate all possible 2-stack 1-queue layouts of a complete graphs with
8 vertices. Except that edges (i, i + 1) are always assigned to a stack
pages which skips minor variations and reduces the total number to 32
layouts.
"""
num_vertices = 8
stacks = 2
queues = 1
graph = generate_complete_graph(num_vertices)
mll = LinearLayout(graph=graph,
order=list(range(1, num_vertices + 1)),
stacks=stacks, queues=queues)
# Add edges that can always be on the stack without crossings
remaining_edges = graph.edges
if stacks:
for i in range(1, num_vertices):
mll.stacks[0].append((i, i + 1))
mll.stacks[0].append((1, num_vertices))
remaining_edges = list(set(graph.edges) - set(mll.stacks[0]))
def search(mll, edges):
edges = list(edges)
if not edges:
mlls.append(copy.deepcopy(mll))
return None
edge = edges.pop()
for stack in mll.stacks:
stack.append(edge)
if mll.is_stack_valid(stack):
if search(mll, edges):
return mll
stack.remove(edge)
for queue in mll.queues:
queue.append(edge)
if mll.is_queue_valid(queue):
if search(mll, edges):
return mll
queue.remove(edge)
mlls = []
search(mll, remaining_edges)
for mll in mlls:
if not mll.is_valid():
raise Exception('MLL is not valid!')
if show_layouts:
show_linear_layouts([mll])
return mlls
def observation_2():
"""
Show that given two complete graphs with 8 vertices on a 2-stack 1-queue
layout the vertices can only interleave once.
"""
mlls_1 = observation_1(show_layouts=False)
mlls_2 = []
# create a second list of layouts that contains the same layouts and
# additionally all of them with the stack pages swapped
for mll in copy.deepcopy(mlls_1):
mlls_2.append(mll)
mll_new = copy.deepcopy(mll)
mll_new.stacks[0], mll_new.stacks[1] = \
mll_new.stacks[1], mll_new.stacks[0]
mlls_2.append(mll_new)
# Create a graph of two independent complete graphs with 8 vertices
graph = Graph()
num_vertices = 8
for i in range(1, num_vertices + 1):
for j in range(i + 1, num_vertices + 1):
graph.add_edge(i, j)
for i in range(num_vertices + 1, num_vertices * 2 + 1):
for j in range(i + 1, num_vertices * 2 + 1):
graph.add_edge(i, j)
mlls = []
for permutation in list(itertools.product([True, False], repeat=8)):
if all(permutation):
# Skip the permutation that would separate both K8
continue
for m1 in mlls_1:
for m2 in mlls_2:
order = []
o1 = list(range(1, 9))
o2 = list(range(9, 17))
# All these permutations create all the possible orders in
# which the two K8 can interleave.
for p in permutation:
if p:
order.append(o1.pop(0))
else:
order.append(o2.pop(0))
order.append(o1.pop(0))
while o2:
order.append(o2.pop(0))
mll = LinearLayout(graph=graph, order=order,
stacks=2, queues=1)
# Create a new layout by combining the layouts of m1 and m2.
# Note that the edges of m2 need to be adjusted here. m2 has
# vertices form 1-8 but here they should become 9-16
mll.stacks[0] = m1.stacks[0] + [(a + 8, b + 8) for a, b in m2.stacks[0]]
mll.stacks[1] = m1.stacks[1] + [(a + 8, b + 8) for a, b in m2.stacks[1]]
mll.queues[0] = m1.queues[0] + [(a + 8, b + 8) for a, b in m2.queues[0]]
if mll.is_valid():
# This order is the only possible order in which
# both K8 can interleave (except for the mirrored
# version). Note that the list is ordered
# except that 9 and 8 are swapped.
if order == [1, 2, 3, 4, 5, 6, 7, 9, 8, 10, 11, 12, 13, 14, 15, 16]:
mlls.append(mll)
else:
print('The observation is false!')
print('%s layouts found' % len(mlls))
for mll in mlls:
pass # Uncomment the next line to show the layouts
#show_linear_layouts([mll])
def observation_3():
"""
Show that given two complete graphs with 8 vertices that share two vertices
there is only one vertex order possible where the shared vertices are
exactly in the middle.
"""
mlls_1 = observation_1(show_layouts=False)
mlls_2 = []
# create a second list of layouts that contains the same layouts and
# additionally all of them with the stack pages swapped
for mll in copy.deepcopy(mlls_1):
mlls_2.append(mll)
mll_new = copy.deepcopy(mll)
mll_new.stacks[0], mll_new.stacks[1] = \
mll_new.stacks[1], mll_new.stacks[0]
mlls_2.append(mll_new)
mlls = []
# The graph has 14 vertices. Generate all pairs. These pairs are going to
# be the two shared vertices
for sv1, sv2 in itertools.combinations(list(range(1, 15)), 2):
# Get lists of vertices that belong each K8
if sv1 > 6:
left_vertices = [1, 2, 3, 4, 5, 6]
elif sv2 > 7:
left_vertices = [1, 2, 3, 4, 5, 6, 7]
left_vertices.remove(sv1)
else:
left_vertices = [1, 2, 3, 4, 5, 6, 7, 8]
left_vertices.remove(sv1)
left_vertices.remove(sv2)
if sv2 < 9:
right_vertices = [9, 10, 11, 12, 13, 14]
elif sv1 < 8:
right_vertices = [8, 9, 10, 11, 12, 13, 14]
right_vertices.remove(sv2)
else:
right_vertices = [7, 8, 9, 10, 11, 12, 13, 14]
right_vertices.remove(sv1)
right_vertices.remove(sv2)
graph = Graph()
for v in range(1, 15):
graph.add_vertex(v)
for i in left_vertices + [sv1, sv2]:
for j in left_vertices + [sv1, sv2]:
if i != j:
graph.add_edge(i, j)
for i in right_vertices + [sv1, sv2]:
for j in right_vertices + [sv1, sv2]:
if i != j:
graph.add_edge(i, j)
for m1 in mlls_1:
for m2 in mlls_2:
order = list(range(1, 15))
mll = LinearLayout(graph=graph, order=order,
stacks=2, queues=1)
l_vertices = copy.copy(left_vertices)
l_vertices.extend([sv1, sv2])
l_vertices.sort()
r_vertices = copy.copy(right_vertices)
r_vertices.extend([sv1, sv2])
r_vertices.sort()
# Create a new layout by combining the layouts of m1 and m2.
transformation = (
(mll.stacks[0], m1.stacks[0], l_vertices),
(mll.stacks[1], m1.stacks[1], l_vertices),
(mll.queues[0], m1.queues[0], l_vertices),
(mll.stacks[0], m2.stacks[0], r_vertices),
(mll.stacks[1], m2.stacks[1], r_vertices),
(mll.queues[0], m2.queues[0], r_vertices),
)
for target_page, source_page, vertex_list in transformation:
for edge in source_page:
v1 = vertex_list[edge[0] - 1]
v2 = vertex_list[edge[1] - 1]
if v1 == sv1 and v2 == sv2 \
and (v1, v2) in mll.stacks[0] + mll.stacks[1] + mll.queues[0]:
# Don't add the edge between the shared vertices twice
continue
target_page.append((v1, v2))
if mll.is_valid():
# The only way to get a valid layout is if the shared
# vertices are placed in the middle
if sv1 == 7 and sv2 == 8:
mlls.append(mll)
else:
print('The observation is false!')
print('%s layouts found' % len(mlls))
for mll in mlls:
pass # Uncomment the next line to show the layouts
show_linear_layouts([mll])
|
import logging
from park.envs.circuit.simulator.utility.logging import StructuredFormatterBuilder, get_console_handler
__all__ = ['get_logger', 'get_default_logger']
def get_logger(name, *handlers: logging.Handler, level=logging.DEBUG, propagate=True):
logger = logging.getLogger(name)
logger.setLevel(level)
del logger.handlers[:]
logger.handlers.extend(handlers)
logger.propagate = propagate
return logger
def get_default_logger(name, *, level=logging.DEBUG, propagate=True, **kwargs):
basic_handler = get_console_handler(formatter=StructuredFormatterBuilder(**kwargs).get_colorful_formatter())
return get_logger(name, basic_handler, level=level, propagate=propagate)
|
from pyg_base import presync, dt, drange, eq
from pyg_base import df_fillna, df_reindex, nona, reducing, Dict, np_reindex
import pandas as pd; import numpy as np
import pytest
def test_df_fillna():
s = pd.Series([0., 1., 4., np.nan, 16.], np.arange(0,5))
assert np.isnan(df_fillna(s)[3])
assert df_fillna(s, 'bfill')[3] == 16
assert df_fillna(s, 'ffill')[3] == 4
assert df_fillna(s, 'linear')[3] == 10
assert df_fillna(s, 5)[3] == 5
assert round(df_fillna(s, 'quadratic')[3],10) == 9.
s = s.values
assert np.isnan(df_fillna(s)[3])
assert df_fillna(s, 'bfill')[3] == 16
assert df_fillna(s, 'ffill')[3] == 4
assert df_fillna(s, 'linear')[3] == 10
assert df_fillna(s, 5)[3] == 5
assert round(df_fillna(s, 'quadratic')[3],10) == 9.
s = pd.Series([np.nan, 1., 4., np.nan, 16.], np.arange(0,5))
assert len(df_fillna(s, 'nona')) == 3
assert len(df_fillna(s, 'fnna')) == 4
df = pd.DataFrame(dict(a = [0,np.nan, 1, 2, 3], b = [0,np.nan, np.nan,3,4]))
assert len(df_fillna(df, 'nona')) == 4
assert eq(df_fillna(df, 'linear'), pd.DataFrame(dict(a = [0,0.5, 1, 2, 3], b = [0.,1,2,3,4])))
def test_df_fillna_multiple():
s = pd.Series([np.nan, 1., 4., np.nan, 16.], np.arange(0,5))
x = df_fillna(s, ['ffill', 'bfill'])
assert x[0] == 1 and x[3] == 4
x = df_fillna(s, ['bfill', 'bfill'])
assert x[0] == 1 and x[3] == 16
s = s.values
x = df_fillna(s, ['ffill', 'bfill'])
assert x[0] == 1 and x[3] == 4
x = df_fillna(s, ['bfill', 'bfill'])
assert x[0] == 1 and x[3] == 16
def test_df_fillna_no_overlap():
s = pd.Series([1,2,3,4,5], np.arange(0,10,2))
index = np.arange(1,11,2)
assert len(nona(df_reindex(s, index))) == 0
assert eq(s.reindex(index, method = 'ffill'), pd.Series([1,2,3,4,5], index))
def test_df_fillna_limit():
s = pd.Series([np.nan, 1., 4., np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 16.], np.arange(12))
x = df_fillna(s, 'ffill', limit = 4)
assert x[6] == 4.0 and np.isnan(x[7])
def test_df_reindex():
tss = [pd.Series(np.arange(20), drange(i, 19+i)) for i in range(5)]
inner = pd.concat(df_reindex(tss, 'inner'), axis = 1)
assert len(inner) == 16
outer = pd.concat(df_reindex(tss, 'outer'), axis = 1)
assert len(outer) == 24
assert np.isnan(outer.iloc[-1,0])
outer2 = pd.concat(df_reindex(tss, 'outer', 'ffill'), axis = 1)
assert len(outer2) == 24
assert outer2.iloc[-1,0] == 19.
def test_df_reindex_fail():
ts = pd.Series(range(100), drange(-99))
with pytest.raises(ValueError):
df_reindex(ts, 100)
assert eq(df_reindex(ts, ts[10:]), ts[10:])
def test_df_reindex_no_overlap():
s = pd.Series(np.arange(5), np.arange(0,10,2))
index = np.arange(1,11,2)
for method in ('bfill', 'ffill', 'pad'):
assert eq(df_reindex(s, index, method), s.reindex(index, method = method))
def test_df_reindex_dict():
tss = {i : pd.Series(np.arange(20), drange(i, 19+i)) for i in range(5)}
inner = df_reindex(tss, 'inner')
assert len(inner[0]) == 16
outer = df_reindex(tss, 'outer')
assert len(outer[0]) == 24
assert np.isnan(outer[0][-1])
outer2 = df_reindex(tss, 'outer', method = 'ffill')
assert outer2[0][-1] == 19
inner2 = df_reindex(Dict(tss), 'inner')
assert isinstance(inner2, Dict)
assert len(inner2[0]) == 16
def test_np_renindex():
ts = np.arange(1000) * 1.
index = pd.Series(range(500), drange(-499))
assert eq(np_reindex(ts, index), pd.Series(np.arange(500,1000), drange(-499)))
index = pd.Series(range(1500), drange(-1499))
assert eq(np_reindex(ts, index), pd.Series(np.arange(1000), drange(-999)))
ts = np.random.normal(0,1,(1000,3)) * 1.
index = drange(-999)
assert eq(np_reindex(ts, index, ['a', 'b', 'c']), pd.DataFrame(ts, index, ['a', 'b', 'c']))
def test_presync_simple():
x = pd.Series([1,2,3,4], drange(-3))
y = pd.Series([1,2,3,4], drange(-4,-1))
z = pd.DataFrame([[1,2],[3,4]], drange(-3,-2), ['a','b'])
f = lambda a, b: a+b
assert list(presync(f)(x,z).columns) == ['a', 'b']
res = presync(f, index='outer', method = 'ffill')(x,z)
assert eq(res.a.values, np.array([2,5,6,7]))
res = presync(reducing(f), index='outer', method = 'ffill')([x,y,z])
assert eq(res, pd.DataFrame(dict(a = [np.nan, 4, 8, 10, 11], b = [np.nan, 5, 9, 11, 12]), index = drange(-4)))
assert eq(reducing(presync(f, index='outer', method = 'ffill'))([x,y,z]), res)
def test_presync_with_dicts():
function = lambda a, b: a['x'] + a['y'] + b
self = presync(function, 'outer', method = 'ffill')
x = pd.Series([1,2,3,4], drange(-3))
y = pd.Series([1,2,3,4], drange(-4,-1))
z = pd.DataFrame([[1,2],[3,4]], drange(-3,-2), ['a','b'])
args = (dict(x = x, y = y),)
kwargs = dict(b = z)
res = self(*args, **kwargs)
assert eq(res, pd.DataFrame(dict(a = [np.nan, 4, 8, 10, 11], b = [np.nan, 5, 9, 11, 12]), index = drange(-4)))
def test_presync_with_Dicts():
function = lambda a, b: a.x + a.y + b
self = presync(function, 'outer', method = 'ffill')
x = pd.Series([1,2,3,4], drange(-3))
y = pd.Series([1,2,3,4], drange(-4,-1))
z = pd.DataFrame([[1,2],[3,4]], drange(-3,-2), ['a','b'])
args = (Dict(x = x, y = y),)
kwargs = dict(b = z)
res = self(*args, **kwargs)
assert eq(res, pd.DataFrame(dict(a = [np.nan, 4, 8, 10, 11], b = [np.nan, 5, 9, 11, 12]), index = drange(-4)))
def test_presync_various():
x = pd.Series([1,2,3,4], drange(-3))
y = pd.Series([1,2,3,4], drange(-4,-1))
z = pd.DataFrame([[1,2],[3,4]], drange(-3,-2), ['a','b'])
addition = lambda a, b: a+b
assert list(addition(x,z).columns) == list(x.index) + ['a', 'b']
#But:
assert list(presync(addition)(x,z).columns) == ['a', 'b']
res = presync(addition, index='outer', method = 'ffill')(x,z)
assert eq(res.a.values, np.array([2,5,6,7]))
#:Example 2: alignment works for parameters 'buried' within...
#-------------------------------------------------------
function = lambda a, b: a['x'] + a['y'] + b
f = presync(function, 'outer', method = 'ffill')
res = f(dict(x = x, y = y), b = z)
assert eq(res, pd.DataFrame(dict(a = [np.nan, 4, 8, 10, 11], b = [np.nan, 5, 9, 11, 12]), index = drange(-4)))
#:Example 3: alignment of numpy arrays
#-------------------------------------
addition = lambda a, b: a+b
a = presync(addition)
assert eq(a(pd.Series([1,2,3,4], drange(-3)), np.array([[1,2,3,4]]).T), pd.Series([2,4,6,8], drange(-3)))
assert eq(a(pd.Series([1,2,3,4], drange(-3)), np.array([1,2,3,4])), pd.Series([2,4,6,8], drange(-3)))
assert eq(a(pd.Series([1,2,3,4], drange(-3)), np.array([[1,2,3,4],[5,6,7,8]]).T), pd.DataFrame({0:[2,4,6,8], 1:[6,8,10,12]}, drange(-3)))
assert eq(a(np.array([1,2,3,4]), np.array([[1,2,3,4]]).T), np.array([2,4,6,8]))
#:Example 4: inner join alignment of columns in dataframes by default
#---------------------------------------------------------------------
x = pd.DataFrame({'a':[2,4,6,8], 'b':[6,8,10,12.]}, drange(-3))
y = pd.DataFrame({'wrong':[2,4,6,8], 'columns':[6,8,10,12]}, drange(-3))
assert len(a(x,y)) == 0
y = pd.DataFrame({'a':[2,4,6,8], 'other':[6,8,10,12.]}, drange(-3))
assert eq(a(x,y),x[['a']]*2)
y = pd.DataFrame({'a':[2,4,6,8], 'b':[6,8,10,12.]}, drange(-3))
assert eq(a(x,y),x*2)
y = pd.DataFrame({'column name for a single column dataframe is ignored':[1,1,1,1]}, drange(-3))
assert eq(a(x,y),x+1)
a = presync(addition, columns = 'outer')
y = pd.DataFrame({'other':[2,4,6,8], 'a':[6,8,10,12]}, drange(-3))
assert sorted(a(x,y).columns) == ['a','b','other']
#:Example 4: ffilling, bfilling
#------------------------------
x = pd.Series([1.,np.nan,3.,4.], drange(-3))
y = pd.Series([1.,np.nan,3.,4.], drange(-4,-1))
assert eq(a(x,y), pd.Series([np.nan, np.nan,7], drange(-3,-1)))
#but, we provide easy conversion of internal parameters of presync:
assert eq(a.ffill(x,y), pd.Series([2,4,7], drange(-3,-1)))
assert eq(a.bfill(x,y), pd.Series([4,6,7], drange(-3,-1)))
assert eq(a.oj(x,y), pd.Series([np.nan, np.nan, np.nan, 7, np.nan], drange(-4)))
assert eq(a.oj.ffill(x,y), pd.Series([np.nan, 2, 4, 7, 8], drange(-4)))
#:Example 5: indexing to a specific index
#----------------------------------------
index = pd.Index([dt(-3), dt(-1)])
a = presync(addition, index = index)
x = pd.Series([1.,np.nan,3.,4.], drange(-3))
y = pd.Series([1.,np.nan,3.,4.], drange(-4,-1))
assert eq(a(x,y), pd.Series([np.nan, 7], index))
#:Example 6: returning complicated stuff
#----------------------------------------
a = pd.DataFrame(np.random.normal(0,1,(100,10)), drange(-99))
b = pd.DataFrame(np.random.normal(0,1,(100,10)), drange(-99))
def f(a, b):
return (a*b, a.sum(axis=0), b.sum(axis=0))
old = f(a,b)
self = presync(f)
args = (); kwargs = dict(a = a, b = b)
new = self(*args, **kwargs)
assert abs(new[1]-old[1]).max() < 1e-10
assert abs(new[2]-old[2]).max() < 1e-10
assert abs(new[0]-old[0]).max().max() < 1e-10
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 22/01/2015
@author: jorgesaw
'''
from __future__ import absolute_import, print_function, unicode_literals
class RecursosCSS(object):
CSS = ''
@staticmethod
def cargarRecursoCss(file):
with open(file, 'r') as fh:
RecursosCSS.CSS = fh.read()
|
#!/usr/bin/env python
"""
https://pypi.python.org/pypi/paho-mqtt
"""
import paho.mqtt.client as mqtt
import RPi.GPIO as GPIO
from time import sleep
#define some pin numbers
pinLedSw = 19
pinLedRd = 13
#we have to clean up to reset pin definitions
#GPIO.cleanup()
#set the software to use the Broadcom numbers
GPIO.setmode(GPIO.BCM)
#set up the pins definitions
GPIO.setup(pinLedSw,GPIO.OUT)
GPIO.setup(pinLedRd,GPIO.IN)
def pinSet(pin, setLed):
"""set pin number to given state"""
GPIO.output(pin, setLed)
def pinRead(pin):
"""Read pin number state"""
return GPIO.input(pin)
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("home/study/PiLED")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
if 'ON' in msg.payload:
pinSet(pinLedSw,True)
if 'OFF' in msg.payload:
pinSet(pinLedSw,False)
sleep(0.1)
state = pinRead(pinLedRd)
if state:
sstate = 1
else:
sstate = 0
client.publish("home/study/PiLED/state", sstate)
#def on_disconnect(client, userdata, rc):
# if rc != 0:
# pass
# client.connect("10.0.0.16", 1883, 60)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
#client.on_disconnect = on_disconnect
client.connect(host="10.0.0.16", port=1883, keepalive=60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceEducateSceneConfigCreateModel(object):
def __init__(self):
self._business_scene = None
self._pid = None
self._school_id = None
self._school_std_code = None
self._sign_app_id = None
@property
def business_scene(self):
return self._business_scene
@business_scene.setter
def business_scene(self, value):
self._business_scene = value
@property
def pid(self):
return self._pid
@pid.setter
def pid(self, value):
self._pid = value
@property
def school_id(self):
return self._school_id
@school_id.setter
def school_id(self, value):
self._school_id = value
@property
def school_std_code(self):
return self._school_std_code
@school_std_code.setter
def school_std_code(self, value):
self._school_std_code = value
@property
def sign_app_id(self):
return self._sign_app_id
@sign_app_id.setter
def sign_app_id(self, value):
self._sign_app_id = value
def to_alipay_dict(self):
params = dict()
if self.business_scene:
if hasattr(self.business_scene, 'to_alipay_dict'):
params['business_scene'] = self.business_scene.to_alipay_dict()
else:
params['business_scene'] = self.business_scene
if self.pid:
if hasattr(self.pid, 'to_alipay_dict'):
params['pid'] = self.pid.to_alipay_dict()
else:
params['pid'] = self.pid
if self.school_id:
if hasattr(self.school_id, 'to_alipay_dict'):
params['school_id'] = self.school_id.to_alipay_dict()
else:
params['school_id'] = self.school_id
if self.school_std_code:
if hasattr(self.school_std_code, 'to_alipay_dict'):
params['school_std_code'] = self.school_std_code.to_alipay_dict()
else:
params['school_std_code'] = self.school_std_code
if self.sign_app_id:
if hasattr(self.sign_app_id, 'to_alipay_dict'):
params['sign_app_id'] = self.sign_app_id.to_alipay_dict()
else:
params['sign_app_id'] = self.sign_app_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceEducateSceneConfigCreateModel()
if 'business_scene' in d:
o.business_scene = d['business_scene']
if 'pid' in d:
o.pid = d['pid']
if 'school_id' in d:
o.school_id = d['school_id']
if 'school_std_code' in d:
o.school_std_code = d['school_std_code']
if 'sign_app_id' in d:
o.sign_app_id = d['sign_app_id']
return o
|
# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility function for handling options without doubled or trailing spaces."""
class OptionString(object):
"""Class serving as a builder for option strings.
Motivation: Both manual contatenation and .join() methods
are prone to leaving superfluous spaces if some parts of options
are optional (missing, empty).
The scope of this class is more general than just command line options,
it can concatenate any string consisting of words that may be missing.
But options were the first usage, so method arguments are frequently
named "parameter" and "value".
To keep this generality, automated adding of dashes is optional,
and disabled by default.
Parts of the whole option string are kept as list items (string, stipped),
with prefix already added.
Empty strings are never added to the list (except by constructor).
The class offers many methods for adding, so that callers can pick
the best fitting one, without much logic near the call site.
"""
def __init__(self, prefix="", *args):
"""Create instance with listed strings as parts to use.
Prefix will be converted to string and stripped.
The typical (nonempty) prefix values are "-" and "--".
:param prefix: Subtring to prepend to every parameter (not value).
:param args: List of positional arguments to become parts.
:type prefix: object
:type args: list of object
"""
self.prefix = str(prefix).strip() # Not worth to call change_prefix.
self.parts = list(args)
def __repr__(self):
"""Return string executable as Python constructor call.
:returns: Executable constructor call as string.
:rtype: str
"""
return "".join([
"OptionString(prefix=", repr(self.prefix), ",",
repr(self.parts)[1:-1], ")"])
# TODO: Would we ever need a copy() method?
# Currently, supersting "master" is mutable but unique,
# substring "slave" can be used to extend, but does not need to be mutated.
def change_prefix(self, prefix):
"""Change the prefix field from the initialized value.
Sometimes it is more convenient to change the prefix in the middle
of string construction.
Typical use is for constructing a command, where the first part
(executeble filename) does not have a dash, but the other parameters do.
You could put the first part into constructor argument,
but using .add and only then enabling prefix is horizontally shorter.
:param prefix: New prefix value, to be converted and tripped.
:type prefix: object
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
self.prefix = str(prefix).strip()
def extend(self, other):
"""Extend self by contents of other option string.
:param other: Another instance to add to the end of self.
:type other: OptionString
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
self.parts.extend(other.parts)
return self
def _check_and_add(self, part, prefixed):
"""Convert to string, strip, conditionally add prefixed if non-empty.
Value of None is converted to empty string.
Emptiness is tested before adding prefix.
:param part: Unchecked part to add to list of parts.
:param prefixed: Whether to add prefix when adding.
:type part: object
:type prefixed: object
:returns: The converted part without prefix, empty means not added.
:rtype: str
"""
part = "" if part is None else str(part).strip()
if part:
prefixed_part = self.prefix + part if prefixed else part
self.parts.append(prefixed_part)
return part
def add(self, parameter):
"""Add parameter if nonempty to the list of parts.
Parameter object is converted to string and stripped.
If parameter converts to empty string, nothing is added.
Parameter is prefixed before adding.
:param parameter: Parameter object, usually a word starting with dash.
:type variable: object
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
self._check_and_add(parameter, prefixed=True)
return self
def add_if(self, parameter, condition):
"""Add parameter if nonempty and condition is true to the list of parts.
If condition truth value is false, nothing is added.
Parameter object is converted to string and stripped.
If parameter converts to empty string, nothing is added.
Parameter is prefixed before adding.
:param parameter: Parameter object, usually a word starting with dash.
:param condition: Do not add if truth value of this is false.
:type variable: object
:type condition: object
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
if condition:
self.add(parameter)
return self
def add_with_value(self, parameter, value):
"""Add parameter, if followed by a value to the list of parts.
Parameter and value are converted to string and stripped.
If parameter or value converts to empty string, nothing is added.
If added, parameter (but not value) is prefixed.
:param parameter: Parameter object, usually a word starting with dash.
:param value: Value object. Prefix is never added.
:type variable: object
:type value: object
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
temp = OptionString(prefix=self.prefix)
# TODO: Is pylint really that ignorant?
# How could it not understand temp is of type of this class?
# pylint: disable=protected-access
if temp._check_and_add(parameter, prefixed=True):
if temp._check_and_add(value, prefixed=False):
self.extend(temp)
return self
def add_equals(self, parameter, value):
"""Add parameter=value to the list of parts.
Parameter and value are converted to string and stripped.
If parameter or value converts to empty string, nothing is added.
If added, parameter (but not value) is prefixed.
:param parameter: Parameter object, usually a word starting with dash.
:param value: Value object. Prefix is never added.
:type variable: object
:type value: object
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
temp = OptionString(prefix=self.prefix)
# pylint: disable=protected-access
if temp._check_and_add(parameter, prefixed=True):
if temp._check_and_add(value, prefixed=False):
self.parts.append("=".join(temp.parts))
return self
def add_with_value_if(self, parameter, value, condition):
"""Add parameter and value if condition is true and nothing is empty.
If condition truth value is false, nothing is added.
Parameter and value are converted to string and stripped.
If parameter or value converts to empty string, nothing is added.
If added, parameter (but not value) is prefixed.
:param parameter: Parameter object, usually a word starting with dash.
:param value: Value object. Prefix is never added.
:param condition: Do not add if truth value of this is false.
:type variable: object
:type value: object
:type condition: object
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
if condition:
self.add_with_value(parameter, value)
return self
def add_equals_if(self, parameter, value, condition):
"""Add parameter=value to the list of parts if condition is true.
If condition truth value is false, nothing is added.
Parameter and value are converted to string and stripped.
If parameter or value converts to empty string, nothing is added.
If added, parameter (but not value) is prefixed.
:param parameter: Parameter object, usually a word starting with dash.
:param value: Value object. Prefix is never added.
:param condition: Do not add if truth value of this is false.
:type variable: object
:type value: object
:type condition: object
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
if condition:
self.add_equals(parameter, value)
return self
def add_with_value_from_dict(self, parameter, key, mapping, default=""):
"""Add parameter with value from dict under key, or default.
If key is missing, default is used as value.
Parameter and value are converted to string and stripped.
If parameter or value converts to empty string, nothing is added.
If added, parameter (but not value) is prefixed.
:param parameter: The parameter part to add with prefix.
:param key: The key to look the value for.
:param mapping: Mapping with keys and values to use.
:param default: The value to use if key is missing.
:type parameter: object
:type key: str
:type mapping: dict
:type default: object
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
value = mapping.get(key, default)
return self.add_with_value(parameter, value)
def add_equals_from_dict(self, parameter, key, mapping, default=""):
"""Add parameter=value to options where value is from dict.
If key is missing, default is used as value.
Parameter and value are converted to string and stripped.
If parameter or value converts to empty string, nothing is added.
If added, parameter (but not value) is prefixed.
:param parameter: The parameter part to add with prefix.
:param key: The key to look the value for.
:param mapping: Mapping with keys and values to use.
:param default: The value to use if key is missing.
:type parameter: object
:type key: str
:type mapping: dict
:type default: object
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
value = mapping.get(key, default)
return self.add_equals(parameter, value)
def add_if_from_dict(self, parameter, key, mapping, default="False"):
"""Add parameter based on if the condition in dict is true.
If key is missing, default is used as condition.
If condition truth value is false, nothing is added.
Parameter is converted to string and stripped.
If parameter converts to empty string, nothing is added.
Parameter is prefixed before adding.
:param parameter: The parameter part to add with prefix.
:param key: The key to look the value for.
:param mapping: Mapping with keys and values to use.
:param default: The value to use if key is missing.
:type parameter: object
:type key: str
:type mapping: dict
:type default: object
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
condition = mapping.get(key, default)
return self.add_if(parameter, condition)
def add_with_value_if_from_dict(
self, parameter, value, key, mapping, default="False"):
"""Add parameter and value based on condition in dict.
If key is missing, default is used as condition.
If condition truth value is false, nothing is added.
Parameter and value are converted to string and stripped.
If parameter or value converts to empty string, nothing is added.
If added, parameter (but not value) is prefixed.
:param parameter: The parameter part to add with prefix.
:param value: Value object. Prefix is never added.
:param key: The key to look the value for.
:param mapping: Mapping with keys and values to use.
:param default: The value to use if key is missing.
:type parameter: object
:type value: object
:type key: str
:type mapping: dict
:type default: object
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
condition = mapping.get(key, default)
return self.add_with_value_if(parameter, value, condition)
def add_equals_if_from_dict(
self, parameter, value, key, mapping, default="False"):
"""Add parameter=value based on condition in dict.
If key is missing, default is used as condition.
If condition truth value is false, nothing is added.
Parameter and value are converted to string and stripped.
If parameter or value converts to empty string, nothing is added.
If added, parameter (but not value) is prefixed.
:param parameter: The parameter part to add with prefix.
:param value: Value object. Prefix is never added.
:param key: The key to look the value for.
:param mapping: Mapping with keys and values to use.
:param default: The value to use if key is missing.
:type parameter: object
:type value: object
:type key: str
:type mapping: dict
:type default: object
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
condition = mapping.get(key, default)
return self.add_equals_if(parameter, value, condition)
def __str__(self):
"""Return space separated string of nonempty parts.
The format is suitable to be pasted as (part of) command line.
Do not call str() prematurely just to get a substring, consider
converting the surrounding text manipulation to OptionString as well.
:returns: Space separated string of options.
:rtype: str
"""
return " ".join(self.parts)
|
"""GCN implementation copied from https://github.com/mie-lab/traffic4cast-
Graph-ResNet/blob/master/models/graph_models.py with permission."""
# Copyright 2021 Institute of Advanced Research in Artificial Intelligence (IARAI) GmbH.
# IARAI licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from typing import Union
import torch
import torch.nn.functional as F
from torch import nn
from torch_geometric.nn import ChebConv # noqa
class Kipfblock(torch.nn.Module):
"""GCN Block based on Thomas N Kipf and Max Welling, Semi-supervised
classification with graph convolutionalnetworks.arXiv preprint
arXiv:1609.02907, 2016, https://arxiv.org/abs/1609.02907v4.
GCN Conv -> Batch Norm -> RELU -> Dropout
See http://proceedings.mlr.press/v123/martin20a/martin20a.pdf Figure 3.
"""
def __init__(self, n_input: int, n_hidden: int = 64, K: int = 8, p: float = 0.5, bn: bool = False):
"""
Parameters
----------
n_input
number of input features
n_hidden: int
number of output features
K: int
Chebyshev filter size :math:`K`. See `torch_geometric.nn.ChebConv`
p: float
dropout rate
bn: bool
batch normalization?
"""
super(Kipfblock, self).__init__()
self.conv1 = ChebConv(n_input, n_hidden, K=K)
self.p = p
self.n_input = n_input
self.n_hidden = n_hidden
self.do_bn = bn
if bn:
self.bn = torch.nn.BatchNorm1d(n_hidden)
def forward(self, x, edge_index):
if self.do_bn:
x = F.relu(self.bn(self.conv1(x, edge_index)))
else:
x = F.relu(self.conv1(x, edge_index))
x = F.dropout(x, training=self.training, p=self.p)
return x
class Graph_resnet(torch.nn.Module):
"""Graph resnet based on Martin et al., Graph-ResNets for short-term
traffic forecasts in almost unknown cities, 2020,
http://proceedings.mlr.press/v123/martin20a/martin20a.pdf.
Generalization of http://proceedings.mlr.press/v123/martin20a/martin20a.pdf Figure 3 right.
"""
def __init__(
self,
num_features: int,
num_classes: int,
nh: Union[int, List[int]] = 38,
K: int = 6,
K_mix: int = 2,
inout_skipconn: bool = True,
depth: int = 3,
p: float = 0.5,
bn: bool = False,
):
"""
Parameters
----------
num_features
num_classes
nh: Union[int, List[int]]
hidden size(s)
K: int
Chebyshev filter size :math:`K`. See `torch_geometric.nn.ChebConv`
K_mix:
Chebyshev filter size for `inout_skipconn`.
inout_skipconn: bool
depth: int
p: float
dropout rate
bn: bool
batch normalization?
"""
super(Graph_resnet, self).__init__()
self.inout_skipconn = inout_skipconn
self.depth = depth
self.Kipfblock_list = nn.ModuleList()
self.skipproject_list = nn.ModuleList()
if isinstance(nh, list):
# if you give every layer a different number of channels
# you need one number of channels for every layer!
assert len(nh) == depth
else:
channels = nh
nh = []
for _ in range(depth):
nh.append(channels)
for i in range(depth):
if i == 0:
self.Kipfblock_list.append(Kipfblock(n_input=num_features, n_hidden=nh[0], K=K, p=p, bn=bn))
self.skipproject_list.append(ChebConv(num_features, nh[0], K=1))
else:
self.Kipfblock_list.append(Kipfblock(n_input=nh[i - 1], n_hidden=nh[i], K=K, p=p, bn=bn))
self.skipproject_list.append(ChebConv(nh[i - 1], nh[i], K=1))
if inout_skipconn:
self.conv_mix = ChebConv(nh[-1] + num_features, num_classes, K=K_mix)
else:
self.conv_mix = ChebConv(nh[-1], num_classes, K=K_mix)
def forward(self, data, **kwargs):
x, edge_index = data.x, data.edge_index
for i in range(self.depth):
x = self.Kipfblock_list[i](x, edge_index) + self.skipproject_list[i](x, edge_index)
if self.inout_skipconn:
x = torch.cat((x, data.x), 1)
x = self.conv_mix(x, edge_index)
else:
x = self.conv_mix(x, edge_index)
return x
class KipfNet_orig(torch.nn.Module):
"""One Kipf block without skip connection."""
def __init__(self, num_features, num_classes, nh1=64, K=8):
super(KipfNet_orig, self).__init__()
self.conv1 = ChebConv(num_features, nh1, K=K)
self.conv2 = ChebConv(nh1, num_classes, K=K)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = F.relu(self.conv1(x, edge_index))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
return x
class KipfNet(torch.nn.Module):
"""One Kipf block with skip connection, see
http://proceedings.mlr.press/v123/martin20a/martin20a.pdf Figure 3 left."""
def __init__(self, num_features, num_classes, nh1=64, K=8, K_mix=2, inout_skipconn=False):
super(KipfNet, self).__init__()
self.inout_skipconn = inout_skipconn
self.Kipfblock1 = Kipfblock(n_input=num_features, n_hidden=nh1, K=K)
if inout_skipconn:
self.conv_mix = ChebConv(nh1 + num_features, num_classes, K=K_mix)
else:
self.conv_mix = ChebConv(nh1, num_classes, K=K_mix)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.Kipfblock1(x, edge_index)
if self.inout_skipconn:
x = torch.cat((x, data.x), 1)
x = self.conv_mix(x, edge_index)
else:
x = self.conv_mix(x, edge_index)
return x
class KipfNetd2(torch.nn.Module):
"""Two Kipf blocks with one global skip connection, see
http://proceedings.mlr.press/v123/martin20a/martin20a.pdf Figure 3
middle."""
def __init__(self, num_features, num_classes, nh1=2, nh2=2, K=2, K_mix=1, inout_skipconn=True):
super(KipfNetd2, self).__init__()
self.inout_skipconn = inout_skipconn
self.Kipfblock1 = Kipfblock(n_input=num_features, n_hidden=nh1, K=K)
self.Kipfblock2 = Kipfblock(n_input=nh1, n_hidden=nh2, K=K)
if inout_skipconn:
self.conv_mix = ChebConv(nh2 + num_features, num_classes, K=K_mix)
else:
self.conv_mix = ChebConv(nh2, num_classes, K=K_mix)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.Kipfblock1(x, edge_index)
x = self.Kipfblock2(x, edge_index)
if self.inout_skipconn:
x = torch.cat((x, data.x), 1)
x = self.conv_mix(x, edge_index)
else:
x = self.conv_mix(x, edge_index)
return x
class KipfNet_resd2(torch.nn.Module):
"""Graph resnet depth 2, see
http://proceedings.mlr.press/v123/martin20a/martin20a.pdf Figure 3
right."""
def __init__(self, num_features, num_classes, nh1=64, nh2=32, K=8, K_mix=2, inout_skipconn=True):
super(KipfNet_resd2, self).__init__()
self.inout_skipconn = inout_skipconn
self.Kipfblock1 = Kipfblock(n_input=num_features, n_hidden=nh1, K=K)
self.Kipfblock2 = Kipfblock(n_input=nh1, n_hidden=nh2, K=K)
self.skip_project1 = ChebConv(in_channels=self.Kipfblock1.n_input, out_channels=self.Kipfblock1.n_hidden, K=1)
self.skip_project2 = ChebConv(in_channels=self.Kipfblock2.n_input, out_channels=self.Kipfblock2.n_hidden, K=1)
if inout_skipconn:
self.conv_mix = ChebConv(nh2 + num_features, num_classes, K=K_mix)
else:
self.conv_mix = ChebConv(nh2, num_classes, K=K_mix)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.Kipfblock1(x, edge_index) + self.skip_project1(x, edge_index)
x = self.Kipfblock2(x, edge_index) + self.skip_project2(x, edge_index)
if self.inout_skipconn:
x = torch.cat((x, data.x), 1)
x = self.conv_mix(x, edge_index)
else:
x = self.conv_mix(x, edge_index)
return x
|
import asyncio
import aiohttp
import json
from ..helpers import pick_by
from ..backend import Backend
from ..update import Message, ReceiverType, Update, UpdateType, Attachment
from ..exceptions import RequestException
from ..logger import logger
SUPPORTED_ATTACHMENT_TYPES = (
"audio",
"document",
"photo",
"sticker",
"video",
"voice",
)
ATTACHMENT_TYPE_ALIASES = {
"doc": "document",
"image": "photo",
}
class Telegram(Backend):
def __init__(
self,
token,
messages_per_second=29,
session=None,
proxy=None,
api_url="https://api.telegram.org",
**kwargs,
):
super().__init__(**kwargs)
if not token:
raise ValueError("No `token` specified")
self.offset = 0
self.proxy = proxy
self.session = session
self._is_session_local = session is None
self.username = None
self.api_token = token
self.api_messages_pause = 1 / messages_per_second
self.api_messages_lock = None
api_url = api_url.rstrip("/")
self.api_url = f"{api_url}/bot{token}/{{}}"
self.file_url = f"{api_url}/file/bot{token}/{{}}"
async def _request(self, method, kwargs={}):
if not self.session:
self.session = aiohttp.ClientSession()
data = {k: v for k, v in kwargs.items() if v is not None}
url = self.api_url.format(method)
async with self.session.post(url, proxy=self.proxy, data=data) as resp:
data = await resp.json(content_type=None)
if not data.get("ok"):
raise RequestException(self, (method, {**kwargs}), data)
res = data["result"]
logger.debug("Telegram: %s(%s) => %s", method, kwargs, res)
return res
async def _request_file(self, file_id):
file = await self._request("getFile", {"file_id": file_id})
url = self.file_url.format(file["file_path"])
async with self.session.get(url, proxy=self.proxy) as resp:
return await resp.read()
def _make_getter(self, file_id):
async def getter():
return await self._request_file(file_id)
return getter
def _make_attachment(self, raw_attachment, raw_attachment_type):
t = raw_attachment_type
d = raw_attachment
if "file_id" in d:
id = d["file_id"]
else:
id = None
if t == "photo":
photo = list(sorted(d, key=lambda p: p["width"]))[-1]
id = photo["file_id"]
return Attachment._existing_full(
id=id, type="image", title="", file_name=id,
getter=self._make_getter(id), raw=d,
)
elif t == "audio":
title = d.get("performer", "") + " - " + d.get("title", "")
return Attachment._existing_full(
id=id, type="audio", title=title,
file_name=id, getter=self._make_getter(id), raw=d,
)
elif t == "document":
return Attachment._existing_full(
id=id, type="doc", title="",
file_name=d.get("file_name", ""),
getter=self._make_getter(id), raw=d,
)
elif t == "sticker":
return Attachment._existing_full(
id=id, type="sticker", title="", file_name=id,
getter=self._make_getter(id), raw=d,
)
elif t == "voice":
return Attachment._existing_full(
id=id, type="voice", title="", file_name=id,
getter=self._make_getter(id), raw=d,
)
elif t == "video":
return Attachment._existing_full(
id=id, type="video", title="", file_name=id,
getter=self._make_getter(id), raw=d,
)
else:
return Attachment._existing_full(
id=None, type=t, title=None, file_name=None, getter=None,
raw=d,
)
def prepare_context(self, ctx):
if ctx.update.type == UpdateType.UPD:
if ctx.update.raw.get("callback_query"):
cq = ctx.update.raw["callback_query"]
sender_id = cq["from"]["id"]
receiver_id = cq["message"]["chat"]["id"]
if cq["message"]["chat"]["type"] == "private":
ctx.default_target_id = sender_id
else:
ctx.default_target_id = receiver_id
ctx.sender_key = ctx.get_key_for(sender_id=sender_id)
ctx.receiver_key = ctx.get_key_for(receiver_id=receiver_id)
ctx.sender_here_key = ctx.get_key_for(sender_id=sender_id, receiver_id=receiver_id)
def _extract_text(self, update):
entities = update["message"].get("entities", ())
if not entities:
return update["message"].get("text", ""), {}
text = update["message"].get("text", "")
final_text = ""
last_index = 0
meta = {}
for entity in sorted(entities, key=lambda entity: entity["offset"]):
if entity["type"] == "bot_command":
new_last_index = entity["offset"] + entity["length"]
command = text[last_index: new_last_index]
if command.endswith(f"@{self.username}"):
final_text += command[:-len(f"@{self.username}")]
meta["bot_mentioned"] = True
else:
final_text += command
last_index = new_last_index
return final_text + text[last_index:], meta
def _make_update(self, raw_update):
if "message" not in raw_update:
return Update(raw_update, UpdateType.UPD, {})
attachments = []
possible_types = (
"audio", "voice", "photo", "video", "document", "sticker",
"animation", "video_note", "contact", "location", "venue",
"poll", "invoice"
)
for key in possible_types:
if key in raw_update["message"]:
attachments.append(
self._make_attachment(raw_update["message"][key], key)
)
if raw_update["message"]["chat"]["type"] == "private":
receiver_type = ReceiverType.SOLO
text = raw_update["message"].get("text", "")
meta = {}
else:
receiver_type = ReceiverType.MULTI
text, meta = self._extract_text(raw_update)
return Message(
raw=raw_update,
type=UpdateType.MSG,
text=text,
attachments=attachments,
sender_id=raw_update["message"]["from"]["id"],
receiver_id=raw_update["message"]["chat"]["id"],
receiver_type=receiver_type,
date=raw_update["message"]["date"],
meta=meta,
)
async def acquire_updates(self, submit_update):
try:
response = await self._request(
"getUpdates", {"timeout": 25, "offset": self.offset}
)
except (json.JSONDecodeError, aiohttp.ClientError):
return
except asyncio.CancelledError:
raise
except Exception:
logger.exception("Exceptions while gettings updates (Telegram)")
await asyncio.sleep(1)
return
for update in response:
await submit_update(self._make_update(update))
self.offset = update["update_id"] + 1
async def execute_send(self, target_id, message, attachments, kwargs):
result = []
chat_id = str(target_id)
async with self.api_messages_lock:
if message:
result.append(await self._request("sendMessage", {
"chat_id": chat_id,
"text": message,
**kwargs,
}))
await asyncio.sleep(self.api_messages_pause)
if isinstance(attachments, (int, str, Attachment)):
attachments = (attachments,)
for attachment in attachments:
if not isinstance(attachment, Attachment):
raise ValueError(f'Unexpected attachment: "{attachment}"')
attachment_type = ATTACHMENT_TYPE_ALIASES.get(
attachment.type,
attachment.type,
)
send_method = f"send{attachment_type.capitalize()}"
if attachment.uploaded:
result.append(await self._request(send_method, pick_by({
"chat_id": chat_id,
attachment_type: str(attachment.id),
"caption": attachment.title,
})))
await asyncio.sleep(self.api_messages_pause)
continue
if attachment_type not in SUPPORTED_ATTACHMENT_TYPES:
raise ValueError(f"Can't upload attachment '{attachment_type}'")
result.append(await self._request(send_method, pick_by({
"chat_id": chat_id,
attachment_type: attachment.file,
"caption": attachment.title,
})))
await asyncio.sleep(self.api_messages_pause)
return result
async def execute_request(self, method, kwargs):
return await self._request(method, kwargs)
async def on_start(self, app):
me = await self._request("getMe")
name = me.get("first_name", "") + " " + me.get("last_name", "")
name = name.strip() or "(unknown)"
logger.info(
'logged in as "%s" ( https://t.me/%s )',
name,
me["username"],
)
self.username = me["username"]
self.api_messages_lock = asyncio.Lock()
async def send_message(self, target_id, message, attachments=(), **kwargs):
"""
Send message to specified `target_id` with text `message` and
attachments `attachments`.
This method will forward all excessive keyword arguments to
sending method.
"""
return await self.execute_send(target_id, message, attachments, kwargs)
async def request(self, method, **kwargs):
"""
Call specified method from Telegram api with specified
kwargs and return response's data.
"""
return await self._request(method, kwargs)
async def on_shutdown(self, app):
if self._is_session_local:
await self.session.close()
|
"""Module with functions called to perform various benchmarks."""
import time
import datetime
def measurement_log(thread_id, i, delta, measurement_count):
"""Log an info about the measurement status."""
if thread_id is not None:
print(" thread: #{t} call {i}/{m} {delta}".format(t=thread_id,
i=i + 1,
delta=delta,
m=measurement_count))
else:
print(" #{i} {delta}".format(i=i + 1, delta=delta))
def call_callback_function(function_to_call, s3, i):
"""Call the specified callback function."""
assert function_to_call is not None, "Callback function is not specified."
if s3 is None:
return function_to_call(i)
else:
return function_to_call(i, s3)
def measure(function_to_call, check_function, measurement_count, pause_time, thread_id, s3=None):
"""Call the provided callback function repeatedly.
Repeatedly call the provided callback function, then check results by provided check function,
accumulate results and return them.
"""
measurements = []
debug = []
for i in range(measurement_count):
t1 = time.time()
started_at = datetime.datetime.utcnow()
retval = call_callback_function(function_to_call, s3, i)
print("Return value: ", retval)
# let's ignore retval for concurrent calls (ATM)
if thread_id is None:
assert check_function(retval)
t2 = time.time()
finished_at = datetime.datetime.utcnow()
delta = t2 - t1
measurement_log(thread_id, i, delta, measurement_count)
measurements.append({
"measurement_number": i,
"started_at": started_at,
"finished_at": finished_at,
"delta": delta})
# we can store debug data taken from the stack analysis
if "debug" in retval:
debug.append(retval["debug"])
time.sleep(pause_time)
return measurements, debug
def core_api_benchmark(core_api, measurement_count, pause_time, thread_id=None):
"""Measure core API by accessing it and checking status code."""
return measure(lambda i: core_api.get(),
lambda retval: retval.status_code == 200, measurement_count, pause_time,
thread_id)
def jobs_api_benchmark(jobs_api, measurement_count, pause_time, thread_id=None):
"""Measure jobs API by accessing it and checking status code."""
return measure(lambda i: jobs_api.get(),
lambda retval: retval.status_code == 200, measurement_count, pause_time,
thread_id)
def stack_analysis_benchmark(core_api, measurement_count, pause_time, thread_id=None):
"""Measure server and worker modules by starting stack analysis."""
return measure(lambda i: core_api.stack_analysis(thread_id, i),
lambda retval: retval["result"].status_code == 200,
measurement_count, pause_time, thread_id)
def component_analysis_benchmark(core_api, s3, measurement_count, pause_time,
should_exist,
thread_id=None,
ecosystem=None, component=None, version=None):
"""Measure server and worker modules by starting component analysis."""
expected_code = 200 if should_exist else 404
return measure(lambda i, s3: core_api.component_analysis(thread_id, i,
ecosystem, component, version),
lambda retval: retval["result"] == expected_code,
measurement_count, pause_time, thread_id, s3)
def component_analysis_flow_scheduling(jobs_api, s3, measurement_count, pause_time,
thread_id=None,
ecosystem=None, component=None, version=None):
"""Measure jobs and worker modules by starting component analysis."""
return measure(lambda i, s3: jobs_api.component_analysis(i, s3, thread_id,
ecosystem, component, version),
lambda retval: retval is True,
measurement_count, pause_time, thread_id, s3)
def package_query_to_graph_db(gremlin_api, measurement_count, pause_time,
thread_id=None):
"""Measure the simple package query to Gremlin database."""
return measure(lambda i: gremlin_api.package_query(i, None),
lambda retval: gremlin_api.check_gremlin_response(retval),
measurement_count, pause_time, thread_id)
def package_version_query_to_graph_db(gremlin_api, measurement_count, pause_time,
thread_id=None):
"""Measure the simple package+version query to Gremlin database."""
return measure(lambda i: gremlin_api.package_version_query(i, None),
lambda retval: gremlin_api.check_gremlin_response(retval),
measurement_count, pause_time, thread_id)
def core_api_benchmark_thread(core_api, measurement_count, pause_time, q, thread_id):
"""Access core API in current thread and put results into the provided queue."""
measurements = core_api_benchmark(core_api, measurement_count, pause_time, thread_id)
q.put(measurements)
def component_analysis_read_thread_known_component(core_api, s3, measurement_count, pause_time,
q, thread_id):
"""Perform component analysis read in current thread and put results into the provided queue.
Component analysis is performed for known comnonent.
"""
measurements = component_analysis_benchmark(core_api, s3, measurement_count, pause_time, True,
thread_id, "pypi", "clojure_py", "0.2.4")
q.put(measurements)
def component_analysis_read_thread_unknown_component(core_api, s3, measurement_count, pause_time,
q, thread_id):
"""Perform component analysis read in current thread and put results into the provided queue.
Component analysis is performed for unknown comnonent.
"""
measurements = component_analysis_benchmark(core_api, s3, measurement_count, pause_time, False,
thread_id,
"pypi", "non_existing_component", "9.8.7")
q.put(measurements)
def component_analysis_thread(jobs_api, s3, measurement_count, pause_time, q, thread_id):
"""Perform component analysis in current thread and put results into the provided queue."""
measurements = component_analysis_flow_scheduling(jobs_api, s3, measurement_count,
pause_time, thread_id)
q.put(measurements)
def stack_analysis_thread(core_api, s3, measurement_count, pause_time, q, thread_id):
"""Perform stack analysis in current thread and put results into the provided queue."""
measurements = stack_analysis_benchmark(core_api, measurement_count,
pause_time, thread_id)
q.put(measurements)
def package_query_graph_db_thread(core_api, s3, measurement_count, pause_time, q, thread_id):
"""Perform query to graph DB in current thread and put results into the provided queue."""
measurements = package_query_to_graph_db(core_api, measurement_count,
pause_time, thread_id)
q.put(measurements)
def package_version_query_graph_db_thread(core_api, s3, measurement_count, pause_time, q,
thread_id):
"""Perform query to graph DB in current thread and put results into the provided queue."""
measurements = package_version_query_to_graph_db(core_api, measurement_count,
pause_time, thread_id)
q.put(measurements)
|
from django.shortcuts import render
import re
from django.utils.timezone import datetime
from django.http import HttpResponse
#from hello import spotify_signin
def home(request):
return render(
request,
'hello/signIn.html'
)
spotify_signin.spotify_auth()
# def hello_there(request, name): Passing arguments through url
# return render(
# request,
# 'hello/signIn.html'
# )
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Math operations with uncertainties and units.
Simplified version of `uncertainties` python package with some
`~astropy.units` addings, in a much more free form."""
import copy
from astropy import units
import numpy as np
from ._deriv import numpy_ufunc_derivatives, math_derivatives
from ..py_utils import check_iterable
from ..logger import logger
__all__ = ['unit_property', 'UFloat', 'ufloat', 'units']
# pylint:disable=no-else-return,no-else-raise
def _filter_compatible(inp, cls, attr, else_None=False):
"""Filter common data structures compatible with UFloat."""
if else_None:
inp = tuple(getattr(x, attr) if isinstance(x, cls) else None
for x in inp)
else:
inp = tuple(getattr(x, attr) if isinstance(x, cls) else x
for x in inp)
return inp
def unit_property(cls):
"""Add a `unit` property to a class."""
def _unit_getter(self):
if self._unit is None: # noqa:W0212
return units.dimensionless_unscaled
return self._unit # noqa:W0212
def _unit_setter(self, value):
if value is None or units.Unit(value) == units.dimensionless_unscaled:
self._unit = None # noqa:W0212
else:
self._unit = units.Unit(value) # noqa:W0212
cls._unit = None # noqa:W0212
cls.unit = property(_unit_getter, _unit_setter,
doc="Physical unit of the data.")
return cls
@unit_property
class UFloat():
"""Storing float values with stddev uncertainties and units.
Parameters
----------
value : number or array_like
Nominal value(s) of the quantity.
uncertainty : number, array_like or `None` (optional)
Uncertainty value of the quantity. If `None`, the quantity will be
considered with no errors. Must match `value` shape.
unit : `~astropy.units.Unit` or string (optional)
The data unit. Must be `~astropy.units.Unit` compliant.
Notes
-----
- This class don't support memmapping. Is intended to be in memory ops.
- Units are handled by `~astropy.units`.
- Math operations cares about units and uncertainties.
"""
_nominal = None
_uncert = None
_unit = None
def __init__(self, value, uncertainty=None, unit=None):
self.nominal = value
self.uncertainty = uncertainty
self.unit = unit
def _set_uncert(self, value):
if value is None:
self._uncert = None
else:
if np.shape(value) != np.shape(self._nominal):
raise ValueError('Uncertainty with shape different from '
'nominal value: '
f'{np.shape(value)} '
f'{np.shape(self._nominal)}')
if check_iterable(self._nominal):
self._uncert = np.array(value)
else:
self._uncert = float(value)
def _set_nominal(self, value):
if value is None:
raise ValueError('Nominal value cannot be None')
self._nominal = value
self._uncert = None # always value is reset, uncertainty resets
# No unit changes
@property
def uncertainty(self):
"""Uncertainty of the quantity."""
if self._uncert is None:
if check_iterable(self._nominal):
return np.zeros_like(self._nominal)
else:
return 0.0
else:
return self._uncert
@uncertainty.setter
def uncertainty(self, value):
self._set_uncert(value)
@property
def nominal(self):
"""Nominal value of the quantity."""
return self._nominal
@nominal.setter
def nominal(self, value):
self._set_nominal(value)
def reset(self, value, uncertainty=None, unit=None):
"""Reset all the data.
Parameters
----------
value : number or array_like
Nominal value(s) of the quantity.
uncertainty : number, array_like or `None` (optional)
Uncertainty value of the quantity. If `None`, the quantity will be
considered with no errors. Must match `value` shape.
unit : `~astropy.units.Unit` or string (optional)
The data unit. Must be `~astropy.units.Unit` compliant.
"""
self.nominal = value
self.uncertainty = uncertainty
self.unit = unit
def __repr__(self):
ret = "< UFloat "
if check_iterable(self._nominal):
ret += str(np.shape(self._nominal))
else:
ret += str(self._nominal)
if self._uncert is not None:
ret += f"+-{self._uncert}"
ret += f" {self.unit} "
ret += " >"
return ret
def _compute_errors(self, derivs, inpnom, inpstd, **kwargs):
"""Compute the error components using func and derivatives."""
n_derivs = len(derivs) # number of expected numerical inputs?
# check if the number of inputs matches the number of derivs
if len(inpnom) != n_derivs or len(inpstd) != n_derivs:
raise ValueError('Inputs and derivatives have different number '
'of components')
axis = kwargs.get('axis')
if axis:
raise NotImplementedError('Not implemented for apply in axis.')
else:
components = [None]*n_derivs
for i in range(n_derivs):
components[i] = derivs[i](*inpnom)*inpstd[i]
return np.sqrt(np.sum(np.square(components)))
return None
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# TODO: check units across the inputs (including inside lists)
global logger
inpnom = copy.copy(inputs)
for c, a in zip([UFloat], ['nominal']):
# This allows more customization
inpnom = _filter_compatible(inputs, c, a)
inpstd = copy.copy(inputs)
for c, a in zip([UFloat], ['uncertainty']):
# This allows more customization
inpstd = _filter_compatible(inputs, c, a, else_None=True)
nkwargs = copy.copy(kwargs)
skwargs = copy.copy(kwargs)
if kwargs.get('out', ()):
nkwargs['out'] = _filter_compatible(nkwargs['out'],
UFloat, 'nominal')
skwargs['out'] = _filter_compatible(skwargs['out'],
UFloat, 'uncertainty',
else_None=True)
ufn = ufunc.__name__
nominal = getattr(ufunc, method)(*inpnom, **nkwargs)
if ufn in numpy_ufunc_derivatives:
std_func = numpy_ufunc_derivatives[ufn]
std = self._compute_errors(std_func, inpnom, inpstd, **skwargs)
else:
logger.warning("Function %s errors is not implemented.", ufn)
std = None
if isinstance(nominal, tuple):
if std is None:
std = [None]*len(nominal)
return tuple(UFloat(n, s, self.unit)
for n, s in zip(nominal, std))
elif method == 'at':
# no return value
return None
else:
# one return value
return UFloat(nominal, std, self.unit)
def ufloat(value, uncertainty=None, unit=None):
"""Create a UFloat quantity to handle operations. Just wrap UFloat
Parameters
----------
value : number or array_like
Nominal value(s) of the quantity.
uncertainty : number, array_like or `None` (optional)
Uncertainty value of the quantity. If `None`, the quantity will be
considered with no errors. Must match `value` shape.
unit : `~astropy.units.Unit` or string (optional)
The data unit. Must be `~astropy.units.Unit` compliant.
Returns
-------
q : `UFloat`
Quantity generated value, with uncertainty and unit.
"""
q = UFloat(value, uncertainty, unit)
return q
|
# -*- coding: UTF-8 -*-
import os
import pygame
from pygame.locals import *
from constants import BLACK, GAME_WIDTH, GAME_HEIGHT, ITEMS, STATS, TACTICS, PLACES
from text import create_prompt, MenuBox, MenuGrid
ITEMS_MENU = {
'HEAL ITEMS': {
'sort_order': 0,
},
'ATTACK ITEMS': {
'sort_order': 1,
'conditions': {
'battle05': True,
},
},
'MAP ITEMS': {
'sort_order': 2,
},
'SAVE ITEMS': {
'sort_order': 3,
'conditions': {
'destroyed_ammonihah_treasure': True,
},
},
'PASSIVE ITEMS': {
'sort_order': 4,
},
}
class HelpMenu(object):
def __init__(self, screen, game):
self.screen = screen
self.game = game
self.select_sound = pygame.mixer.Sound(os.path.join('data', 'audio', 'select.wav'))
self.state = 'main'
items = [
'WEAPONS',
'BODY ARMOR',
'HELMETS',
'TACTICS',
'ITEMS',
'STATS',
'PLACES',
]
self.menu = MenuBox(items, current_choice=0, border=True, title='Help')
self.menu.focus()
self.weapons_menu = None
self.stats_menu = None
self.places_menu = None
self.body_armor_menu = None
self.helmets_menu = None
self.tactics_menu = None
self.tactics_submenu = None
self.items_menu = None
self.items_submenu = None
self.description = None
def draw(self):
self.screen.fill(BLACK)
self.screen.blit(self.menu.surface, (0, 0))
if self.weapons_menu:
self.screen.blit(self.weapons_menu.surface, (48, 0))
if self.stats_menu:
self.screen.blit(self.stats_menu.surface, (48, 0))
if self.places_menu:
self.screen.blit(self.places_menu.surface, (48, 0))
if self.body_armor_menu:
self.screen.blit(self.body_armor_menu.surface, (48, 0))
if self.helmets_menu:
self.screen.blit(self.helmets_menu.surface, (48, 0))
if self.tactics_menu:
self.screen.blit(self.tactics_menu.surface, (0, 128))
if self.tactics_submenu:
self.screen.blit(self.tactics_submenu.surface, (48, 0))
if self.items_menu:
self.screen.blit(self.items_menu.surface, (0, 128))
if self.items_submenu:
self.screen.blit(self.items_submenu.surface, (48, 0))
if self.description:
self.screen.blit(self.description.surface, (0, 160))
def update(self, dt):
if self.description:
self.description.update(dt)
elif self.state == 'main':
self.menu.update(dt)
elif self.state == 'weapons':
self.weapons_menu.update(dt)
elif self.state == 'stats':
self.stats_menu.update(dt)
elif self.state == 'places':
self.places_menu.update(dt)
elif self.state == 'body_armor':
self.body_armor_menu.update(dt)
elif self.state == 'helmets':
self.helmets_menu.update(dt)
elif self.state == 'tactics':
if self.tactics_submenu:
self.tactics_submenu.update(dt)
else:
self.tactics_menu.update(dt)
elif self.state == 'items':
if self.items_submenu:
self.items_submenu.update(dt)
else:
self.items_menu.update(dt)
def handle_input(self, pressed):
if self.state == 'main':
self.menu.handle_input(pressed)
if pressed[K_x]:
self.select_sound.play()
choice = self.menu.get_choice()
if choice == 'WEAPONS':
self.create_weapons_menu()
if choice == 'STATS':
self.create_stats_menu()
if choice == 'PLACES':
self.create_places_menu()
elif choice == 'BODY ARMOR':
self.create_body_armor_menu()
elif choice == 'HELMETS':
self.create_helmets_menu()
elif choice == 'TACTICS':
self.create_tactics_menu()
elif choice == 'ITEMS':
self.create_items_menu()
elif self.state == 'weapons':
if self.description:
self.description.handle_input(pressed)
if (pressed[K_x] or pressed[K_z]) and not self.description.has_more_stuff_to_show():
self.description.shutdown()
self.description = None
self.weapons_menu.focus()
return
self.weapons_menu.handle_input(pressed)
if pressed[K_x]:
self.select_sound.play()
choice = self.weapons_menu.get_choice().lower()
self.description = create_prompt(ITEMS[choice]['description'])
self.weapons_menu.unfocus()
elif pressed[K_z]:
self.weapons_menu = None
self.menu.focus()
self.state = 'main'
elif self.state == 'stats':
if self.description:
self.description.handle_input(pressed)
if (pressed[K_x] or pressed[K_z]) and not self.description.has_more_stuff_to_show():
self.description.shutdown()
self.description = None
self.stats_menu.focus()
return
self.stats_menu.handle_input(pressed)
if pressed[K_x]:
self.select_sound.play()
choice = self.stats_menu.get_choice()
self.description = create_prompt(STATS[choice]['description'])
self.stats_menu.unfocus()
elif pressed[K_z]:
self.stats_menu = None
self.menu.focus()
self.state = 'main'
elif self.state == 'places':
if self.description:
self.description.handle_input(pressed)
if (pressed[K_x] or pressed[K_z]) and not self.description.has_more_stuff_to_show():
self.description.shutdown()
self.description = None
self.places_menu.focus()
return
self.places_menu.handle_input(pressed)
if pressed[K_x]:
self.select_sound.play()
choice = self.places_menu.get_choice()
self.description = create_prompt(PLACES[choice]['description'])
self.places_menu.unfocus()
elif pressed[K_z]:
self.places_menu = None
self.menu.focus()
self.state = 'main'
elif self.state == 'body_armor':
if self.description:
self.description.handle_input(pressed)
if (pressed[K_x] or pressed[K_z]) and not self.description.has_more_stuff_to_show():
self.description.shutdown()
self.description = None
self.body_armor_menu.focus()
return
self.body_armor_menu.handle_input(pressed)
if pressed[K_x]:
self.select_sound.play()
choice = self.body_armor_menu.get_choice().lower()
self.description = create_prompt(ITEMS[choice]['description'])
self.body_armor_menu.unfocus()
elif pressed[K_z]:
self.body_armor_menu = None
self.menu.focus()
self.state = 'main'
elif self.state == 'helmets':
if self.description:
self.description.handle_input(pressed)
if (pressed[K_x] or pressed[K_z]) and not self.description.has_more_stuff_to_show():
self.description.shutdown()
self.description = None
self.helmets_menu.focus()
return
self.helmets_menu.handle_input(pressed)
if pressed[K_x]:
self.select_sound.play()
choice = self.helmets_menu.get_choice().lower()
self.description = create_prompt(ITEMS[choice]['description'])
self.helmets_menu.unfocus()
elif pressed[K_z]:
self.helmets_menu = None
self.menu.focus()
self.state = 'main'
elif self.state == 'tactics':
if self.description:
self.description.handle_input(pressed)
if (pressed[K_x] or pressed[K_z]) and not self.description.has_more_stuff_to_show():
self.description.shutdown()
self.description = None
self.tactics_submenu.focus()
return
if self.tactics_submenu:
self.tactics_submenu.handle_input(pressed)
if pressed[K_x]:
self.select_sound.play()
choice = self.tactics_submenu.get_choice().lower()
self.description = create_prompt(TACTICS[choice]['description'])
self.tactics_submenu.unfocus()
elif pressed[K_z]:
self.tactics_submenu = None
self.tactics_menu.focus()
return
self.tactics_menu.handle_input(pressed)
if pressed[K_x]:
self.select_sound.play()
# just grab the slot number off the menu choice
choice = int(self.tactics_menu.get_choice()[0])
self.create_tactics_submenu(choice)
elif pressed[K_z]:
self.tactics_menu = None
self.menu.focus()
self.state = 'main'
elif self.state == 'items':
if self.description:
self.description.handle_input(pressed)
if (pressed[K_x] or pressed[K_z]) and not self.description.has_more_stuff_to_show():
self.description.shutdown()
self.description = None
self.items_submenu.focus()
return
if self.items_submenu:
self.items_submenu.handle_input(pressed)
if pressed[K_x]:
self.select_sound.play()
choice = self.items_submenu.get_choice().lower()
self.description = create_prompt(ITEMS[choice]['description'])
self.items_submenu.unfocus()
elif pressed[K_z]:
self.items_submenu = None
self.items_menu.focus()
return
self.items_menu.handle_input(pressed)
if pressed[K_x]:
self.select_sound.play()
# just grab the lower case version of the first word off the choice to get the item type
choice = self.items_menu.get_choice().split()[0].lower()
self.create_items_submenu(choice)
elif pressed[K_z]:
self.items_menu = None
self.menu.focus()
self.state = 'main'
def create_weapons_menu(self):
self.state = 'weapons'
weapons = [
{
'name': name.title(),
'attack_points': info['attack_points'],
}
for name, info in ITEMS.items()
if info['type'] == 'weapon' and self.game.conditions_are_met(info.get('conditions'))
]
sorted_weapons = sorted(weapons, key=lambda k: k['attack_points'])
if len(sorted_weapons) > 10:
gridded_weapons = [
[w['name'] for w in sorted_weapons[0:10]],
[w['name'] for w in sorted_weapons[10:]],
]
self.weapons_menu = MenuGrid(gridded_weapons, border=True)
else:
self.weapons_menu = MenuBox([w['name'] for w in sorted_weapons], border=True)
self.menu.unfocus()
self.weapons_menu.focus()
def create_stats_menu(self):
self.state = 'stats'
stats = [{'name': name, 'sort_order': info['sort_order']} for name, info in STATS.items()]
sorted_stats = sorted(stats, key=lambda k: k['sort_order'])
self.stats_menu = MenuBox([s['name'] for s in sorted_stats], border=True)
self.menu.unfocus()
self.stats_menu.focus()
def create_places_menu(self):
self.state = 'places'
places = [{'name': name, 'sort_order': info['sort_order']} for name, info in PLACES.items()]
sorted_places = sorted(places, key=lambda k: k['sort_order'])
self.places_menu = MenuBox([s['name'] for s in sorted_places], border=True)
self.menu.unfocus()
self.places_menu.focus()
def create_body_armor_menu(self):
self.state = 'body_armor'
body_armor = [
{
'name': name.title(),
'armor_class': info['armor_class'],
}
for name, info in ITEMS.items()
if info['type'] == 'armor' and self.game.conditions_are_met(info.get('conditions'))
]
sorted_body_armor = sorted(body_armor, key=lambda k: k['armor_class'])
gridded_body_armor = [
[b['name'] for b in sorted_body_armor[0:10]],
[b['name'] for b in sorted_body_armor[10:]],
]
self.body_armor_menu = MenuGrid(gridded_body_armor, border=True)
self.menu.unfocus()
self.body_armor_menu.focus()
def create_helmets_menu(self):
self.state = 'helmets'
helmets = [
{
'name': name.title(),
'armor_class': info['armor_class'],
}
for name, info in ITEMS.items()
if info['type'] == 'helmet' and self.game.conditions_are_met(info.get('conditions'))
]
sorted_helmets = [h['name'] for h in sorted(helmets, key=lambda k: k['armor_class'])]
self.helmets_menu = MenuBox(sorted_helmets, border=True)
self.menu.unfocus()
self.helmets_menu.focus()
def create_tactics_menu(self):
self.state = 'tactics'
tactics = [
[
'1 FIRE DAMAGE',
'2 WATER DAMAGE',
'3 HEALING',
],
[
'4 DEFENSIVE',
'5 MISC',
'6 OFFENSIVE',
],
]
self.tactics_menu = MenuGrid(tactics, border=True)
self.menu.unfocus()
self.tactics_menu.focus()
def create_tactics_submenu(self, slot):
tactics = [
{
'name': name.title(),
'min_level': info['min_level'],
}
for name, info in TACTICS.items()
if info['slot'] == slot
]
sorted_tactics = sorted(tactics, key=lambda k: k['min_level'])
listed_tactics = [t['name'] for t in sorted_tactics if self.game.game_state['level'] >= t['min_level']-6]
self.tactics_submenu = MenuBox(listed_tactics, border=True)
self.tactics_menu.unfocus()
self.tactics_submenu.focus()
def create_items_menu(self):
self.state = 'items'
items = [
{'name': name, 'sort_order': info['sort_order']}
for name, info in ITEMS_MENU.items()
if self.game.conditions_are_met(info.get('conditions'))
]
sorted_items = [i['name'] for i in sorted(items, key=lambda k: k['sort_order'])]
self.items_menu = MenuBox(sorted_items, border=True)
self.menu.unfocus()
self.items_menu.focus()
def create_items_submenu(self, typ):
items = [
name.title()
for name, info in ITEMS.items()
if info['type'] == typ and self.game.conditions_are_met(info.get('conditions'))
]
sorted_items = sorted(items)
if len(sorted_items) > 10:
gridded_items = [
sorted_items[0:10],
sorted_items[10:],
]
self.items_submenu = MenuGrid(gridded_items, border=True)
else:
self.items_submenu = MenuBox(sorted_items, border=True)
self.items_menu.unfocus()
self.items_submenu.focus() |
def bar():
foo()
def foo():
try:
asdf()
except:
pass
try:
asdf()
except:
pass
def asdf():
raise Exception()
|
from rest_framework import permissions
from django.db.models import Q
from .models import Task
class TaskPermission(permissions.BasePermission):
def has_permission(self, request, view):
authenticated = request.user.is_authenticated()
if view.action == 'list':
return authenticated
elif view.action == 'create':
return authenticated
elif view.action in ['retrieve', 'update', 'partial_update', 'destroy']:
return authenticated
else:
return False
def has_object_permission(self, request, view, obj):
authenticated = request.user.is_authenticated()
staff = authenticated and request.user.is_staff
owner = authenticated and (obj.owner == request.user)
responsible = authenticated and (obj.responsible == request.user)
task = Task.objects.filter(
Q(owner=request.user) | Q(responsible=request.user)
)
ids = []
for i in task:
ids.append(i.id)
for i in task:
if i.base_task:
ids.append(i.base_task.id)
task = Task.objects.filter(id__in=ids)
if view.action == 'retrieve':
return task or owner or responsible or staff
elif view.action in ['update', 'partial_update']:
return owner or responsible or staff
elif view.action == 'destroy':
return owner or staff
else:
return False
class ProjectPermission(permissions.BasePermission):
def has_permission(self, request, view):
authenticated = request.user.is_authenticated()
if view.action == 'list':
return authenticated
elif view.action == 'create':
return authenticated
elif view.action in ['retrieve', 'update', 'partial_update', 'destroy']:
return authenticated
else:
return False
def has_object_permission(self, request, view, obj):
authenticated = request.user.is_authenticated()
staff = authenticated and request.user.is_staff
owner = authenticated and (obj.owner == request.user)
if view.action == 'retrieve':
return authenticated
elif view.action in ['update', 'partial_update']:
return owner or staff
elif view.action == 'destroy':
return owner or staff
else:
return False
class EmployeePermission(permissions.BasePermission):
def has_permission(self, request, view):
authenticated = request.user.is_authenticated()
staff = authenticated and request.user.is_staff
if view.action == 'list':
return authenticated
elif view.action == 'create':
return authenticated # staff
elif view.action in ['retrieve', 'update', 'partial_update', 'destroy']:
return authenticated
else:
return False
def has_object_permission(self, request, view, obj):
authenticated = request.user.is_authenticated()
staff = authenticated and request.user.is_staff
owner = authenticated and (obj.user == request.user)
if view.action == 'retrieve':
return authenticated
elif view.action in ['update', 'partial_update']:
return owner or staff
elif view.action == 'destroy':
return staff
else:
return False
class SubdivisionPermission(permissions.BasePermission):
def has_permission(self, request, view):
authenticated = request.user.is_authenticated()
staff = authenticated and request.user.is_staff
if view.action == 'list':
return authenticated
elif view.action == 'create':
return staff
elif view.action in ['retrieve', 'update', 'partial_update', 'destroy']:
return authenticated
else:
return False
def has_object_permission(self, request, view, obj):
authenticated = request.user.is_authenticated()
staff = authenticated and request.user.is_staff
owner = authenticated and (obj.manager == request.user)
if view.action == 'retrieve':
return authenticated
elif view.action in ['update', 'partial_update']:
return owner or staff
elif view.action == 'destroy':
return staff
else:
return False |
# 단어 1개 입력받아 나누어 출력하기
s = input()
for a in s:
print(a) |
class QQAPIError(Exception):
"""QQ接口调用异常"""
pass
|
# Copyright (c) 2015 - 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from .board import Board
import RPi.GPIO as GPIO
import csv
import os
class VirtualBoard(Board):
"""
Board class for virtual hardware.
"""
HIGH_SENSOR_PIN = 11
LOW_SENSOR_PIN = 19
FLOW_PIN = 13
PUMP_PIN = 15
LOW_WATER_LEVEL = 70
HIGH_WATER_LEVEL = 80
MAX_WATER_LEVEL = 100
FLOW_RATE = 8
def __init__(self):
super(VirtualBoard, self).__init__()
self.flowing = False
self.flow = self.FLOW_RATE
self.water_level = 75
self.increasing = True
self.water_levels = []
self.water_level_index = 0
self.pump_rate = 0
self.water_level_arr_size = 0
self.pump = False
self.init_water_levels()
self.init_pin_values()
def init_water_levels(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path + "/water_data.csv") as water_data_csv:
counter = 0
readCSV = csv.reader(water_data_csv, delimiter=',')
for row in readCSV:
self.water_levels.append(float(row[0]))
counter += 1
self.water_level_arr_size = counter
def init_pin_values(self):
# Initialize GPIO pins for use
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.HIGH_SENSOR_PIN, GPIO.OUT)
GPIO.setup(self.LOW_SENSOR_PIN, GPIO.OUT)
GPIO.setup(self.FLOW_PIN, GPIO.OUT)
GPIO.setup(self.PUMP_PIN, GPIO.OUT)
# Initialize low sensor pin as on
GPIO.output(self.LOW_SENSOR_PIN, GPIO.HIGH)
# Initialize high sensor pin as off
GPIO.output(self.HIGH_SENSOR_PIN, GPIO.LOW)
# Initialize flow pin as on
GPIO.output(self.FLOW_PIN, GPIO.HIGH)
# Initialize pump pin as on
GPIO.output(self.PUMP_PIN, GPIO.HIGH)
# reset to input (from PLC) after initializing pin values
GPIO.setup(self.FLOW_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.PUMP_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def update_hardware_state(self):
"""
Update hardware state.
"""
pass
# hardware functions
def pump_on(self):
self.pump = True
def pump_off(self):
self.pump = False
def get_flow(self):
return self.flow
def set_flow(flow):
self.flow = flow
def get_water_level(self):
return self.water_level
def set_water_level(water_level):
self.water_level = water_level
def flow_on(self):
self.flowing = True
self.flow = self.FLOW_RATE
def flow_off(self):
self.Flowing = False
self.flow = 0
def detect_flow(self):
return True if self.flow > 1 else False
def detect_nonflow(self):
return True if self.flow <= 0.5 else False
def sample_water_level(self):
self.updateWaterLevel()
return self.water_level
def sample_flow(self):
return self.flow
def updateWaterLevel(self):
if (GPIO.input(self.PUMP_PIN)):
self.water_level += (self.flow - self.water_levels[self.water_level_index])
self.water_level_index += 1
if self.water_level_index >= self.water_level_arr_size:
self.water_level_index = 0
else:
self.water_level += self.flow
if self.water_level >= self.HIGH_WATER_LEVEL:
GPIO.output(self.HIGH_SENSOR_PIN, GPIO.HIGH)
print("High water level")
elif self.water_level <= self.LOW_WATER_LEVEL:
GPIO.output(self.LOW_SENSOR_PIN, GPIO.LOW)
print("Low water level")
else:
GPIO.output(self.HIGH_SENSOR_PIN, GPIO.LOW)
GPIO.output(self.LOW_SENSOR_PIN, GPIO.HIGH)
print("Normal water level")
# if self.HIGH_WATER_LEVEL - self.water_level > self.FLOW_RATE:
# self.flow_on()
# else:
# self.flow_off()
def write_message(self, message, line=0):
"""
Write message to LCD screen.
"""
message = message.ljust(16)
print("\n " + message + "\n")
def change_background(self, color):
"""
Change LCD screen background color.
"""
colors = {
"red": lambda: self.screen.setColor(255, 0, 0),
"purple": lambda: self.screen.setColor(255, 0, 255),
"blue": lambda: self.screen.setColor(0, 0, 255),
"green": lambda: self.screen.setColor(0, 255, 0),
"yellow": lambda: self.screen.setColor(255, 255, 0),
"white": lambda: self.screen.setColor(255, 255, 255)
}
colors.get(color, colors["white"])()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Webonyx and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.db_schema import DbTable
def rename_custom_field(doctype, old_fieldname, new_fieldname):
if not frappe.db.exists('DocType', doctype):
return
tab = DbTable(doctype)
frappe.db.commit()
columns = tab.columns
# if old_fieldname not in columns:
# return
query = "change `{}` `{}` {}".format(old_fieldname, new_fieldname, tab.columns[old_fieldname].get_definition())
frappe.db.sql("ALTER TABLE `{}` {}".format(tab.name, query))
update_custom_field_sql = "UPDATE `tabCustom Field` SET `fieldname` = '{fieldname}', `name` = '{name}' WHERE `dt` = '{doctype}' AND `fieldname` ='{old_fieldname}'". \
format(fieldname=new_fieldname, name="{}-{}".format(doctype, new_fieldname), doctype=doctype,
old_fieldname=old_fieldname)
frappe.db.sql(update_custom_field_sql)
|
import unittest
from pyspark.sql import SparkSession, SQLContext
from pyselica.ml.feature import JapaneseTokenizer
class TestCases(unittest.TestCase):
def setUp(self) -> None:
self.spark = SparkSession.builder.appName('unittest').getOrCreate()
def test_japanese_tokenizer(self):
tokenizer = JapaneseTokenizer(inputCol="sentence", outputCol="words")
sqlContext = SQLContext(self.spark)
df = sqlContext.createDataFrame([(1, "すもももももももものうち")], ["id", "sentence"])
tokenized = tokenizer.transform(df)
pdf = tokenized.toPandas()
self.assertTrue('words' in pdf.columns)
self.assertEquals("すもももももももものうち", ''.join(pdf[pdf['id'] == 1].words.values[0]))
|
"""Example: optimization with different wake models
This example uses a dummy cost function to optimize a simple wind turbine
layout that is subject to constraints. The optimization pushes the wind turbine
locations to specified locations in the farm.
"""
import os
from matplotlib.patches import Polygon
import numpy as np
from py_wake.deficit_models.gcl import GCL
from py_wake.deficit_models.noj import NOJ
from py_wake.examples.data.hornsrev1 import V80
from py_wake.site._site import UniformWeibullSite
from topfarm._topfarm import TopFarmProblem
from topfarm.constraint_components.boundary import XYBoundaryConstraint
from topfarm.constraint_components.spacing import SpacingConstraint
from topfarm.cost_models.py_wake_wrapper import PyWakeAEPCostModelComponent
from topfarm.easy_drivers import EasyScipyOptimizeDriver
from topfarm.plotting import XYPlotComp, NoPlot
from topfarm.cost_models.dummy import DummyCost
from py_wake.wind_farm_models.wind_farm_model import WindFarmModel
def main():
if __name__ == '__main__':
try:
import matplotlib.pyplot as plt
plt.gcf()
plot_comp = XYPlotComp
plot = True
except RuntimeError:
plot_comp = NoPlot
plot = False
# ------------------------ INPUTS ------------------------
# ------------------------ DEFINE WIND RESOURCE ------------------------
# wind resource info (wdir frequency, weibull a and k)
f = [3.597152, 3.948682, 5.167395, 7.000154, 8.364547, 6.43485, 8.643194,
11.77051, 15.15757, 14.73792, 10.01205, 5.165975]
a = [9.176929, 9.782334, 9.531809, 9.909545, 10.04269, 9.593921, 9.584007,
10.51499, 11.39895, 11.68746, 11.63732, 10.08803]
k = [2.392578, 2.447266, 2.412109, 2.591797, 2.755859, 2.595703, 2.583984,
2.548828, 2.470703, 2.607422, 2.626953, 2.326172]
site = UniformWeibullSite(p_wd=f, a=a, k=k, ti=0.075)
wt = V80()
# ------------------------ setup problem ____---------------------------
rot_diam = 80.0 # rotor diameter [m]
init_pos = np.array([(0, 2 * rot_diam), (0, 0),
(0, -2 * rot_diam)]) # initial turbine positions
b = 2 * rot_diam + 10 # boundary size
boundary = [(-b, -b), (-b, b), (b, b), (b, -b)] # corners of wind farm boundary
min_spacing = 2.0 * rot_diam # minimum spacing between turbines [m]
# ------------------------ OPTIMIZATION ------------------------
def get_tf(windFarmModel):
return TopFarmProblem(
design_vars=dict(zip('xy', init_pos.T)),
cost_comp=PyWakeAEPCostModelComponent(windFarmModel, n_wt=3, ws=10, wd=np.arange(0, 360, 12)),
constraints=[SpacingConstraint(min_spacing),
XYBoundaryConstraint(boundary)],
driver=EasyScipyOptimizeDriver(),
plot_comp=plot_comp())
# GCL: define the wake model and optimization problem
tf_gcl = get_tf(GCL(site, wt))
# NOJ: define the wake model and optimization problem
tf_noj = get_tf(NOJ(site, wt))
# run the optimization
cost_gcl, state_gcl, recorder_gcl = tf_gcl.optimize()
cost_noj, state_noj, recorder_noj = tf_noj.optimize()
# ------------------------ POST-PROCESS ------------------------
# get the optimized locations
opt_gcl = tf_gcl.turbine_positions
opt_noj = tf_noj.turbine_positions
# create the array of costs for easier printing
costs = np.diag([cost_gcl, cost_noj])
costs[0, 1] = tf_noj.evaluate(state_gcl)[0] # noj cost of gcl locs
costs[1, 0] = tf_gcl.evaluate(state_noj)[0] # gcl cost of noj locs
# ------------------------ PRINT STATS ------------------------
aep_diffs = 200 * (costs[:, 0] - costs[:, 1]) / (costs[:, 0] + costs[:, 1])
loc_diffs = 200 * (costs[0, :] - costs[1, :]) / (costs[0, :] + costs[1, :])
print('\nComparison of cost models vs. optimized locations:')
print('\nCost | GCL_aep NOJ_aep')
print('---------------------------------')
print(f'GCL_loc |{costs[0,0]:11.2f} {costs[0,1]:11.2f}' +
f' ({aep_diffs[0]:.2f}%)')
print(f'NOJ_loc |{costs[1,0]:11.2f} {costs[1,1]:11.2f}' +
f' ({aep_diffs[1]:.2f}%)')
print(f' ({loc_diffs[0]:.2f}%) ({loc_diffs[1]:.2f}%)')
# ------------------------ PLOT (if possible) ------------------------
if plot:
# initialize the figure and axes
fig = plt.figure(1, figsize=(7, 5))
plt.clf()
ax = plt.axes()
# plot the boundary and desired locations
ax.add_patch(Polygon(boundary, fill=False,
label='Boundary')) # boundary
ax.plot(init_pos[:, 0], init_pos[:, 1], 'xk',
label='Initial')
ax.plot(opt_gcl[:, 0], opt_gcl[:, 1], 'o',
label='GCL')
ax.plot(opt_noj[:, 0], opt_noj[:, 1], '^',
label='NOJ')
# make a few adjustments to the plot
ax.autoscale_view() # autoscale the boundary
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=4, mode='expand', borderaxespad=0.) # add a legend
plt.tight_layout() # zoom the plot in
plt.axis('off') # remove the axis
# save the png
folder, file = os.path.split(__file__)
fig.savefig(folder + "/figures/" + file.replace('.py', '.png'))
main()
|
from typing import List
nums = [3,30,34,5,9]
# alist=list(map(str,nums))
# print(alist)
# def Compare(str):
# def __lt__(x,y):
# return x+y<y+x
# ans=sorted(map(str,nums),key=Compare,reverse=True)
# print(ans)
alist=['2','1','3','0']
class Compare(str):
def __lt__(x, y):
print('x=',x,'y=',y)
print(x+y<y+x)
print()
return x+y<y+x
ans=sorted(alist,key=Compare)
# class Solution:
# def largestNumber(self, nums: List[int]) -> str:
# largest_num = ''.join(sorted(map(str, nums), key=Compare, reverse=True))
# return '0' if largest_num[0] == '0' else largest_num
# sol=Solution()
# ans=sol.largestNumber(nums)
print(ans) |
from django.contrib import admin
from .models import Passport
# Register your models here.
admin.site.register(Passport) |
#This script performs the functions required for lumin transfer that
#leongatys/NeuralImageSynthesis' Color Control code performs.
#https://github.com/leongatys/NeuralImageSynthesis/blob/master/ExampleNotebooks/ColourControl.ipynb
#Standalone script by github.com/ProGamerGov
import skimage
import numpy as np
import argparse
import imageio
from skimage import io,transform,img_as_float
from skimage.io import imread,imsave
from PIL import Image
from numpy import eye
parser = argparse.ArgumentParser()
parser.add_argument('--content_image', type=str, help="The content image. Ex: content.png")
parser.add_argument('--style_image', type=str, help="The style image. Ex: style.png")
parser.add_argument('--output_content_image', default='output_content.png', help="The name of your output content image. Ex: content_output.png", type=str)
parser.add_argument('--output_style_image', default='output_style.png', help="The name of your output style image. Ex: style_output.png", type=str)
parser.add_argument('--output_image', default='output_style.png', help="The name of your output image. Ex: output.png", type=str)
parser.add_argument('--org_content', default='original_content_image.png', help="The name of your original content image. Ex: org_content.png", type=str)
parser.add_argument('--output_lum2', default='out.png', help="The name of your output image from Neural-Style. Ex: out.png", type=str)
parser.add_argument('--cp_mode', default='lum', help="The script's mode. Options are: lum, lum2, match, and match_style", type=str)
parser.parse_args()
args = parser.parse_args()
cp_mode = args.cp_mode
output_content_name = args.output_content_image
output_style_name = args.output_style_image
output_a_name = args.output_image
Image.MAX_IMAGE_PIXELS = 1000000000 # Support gigapixel images
def lum_transform(image):
"""
Returns the projection of a colour image onto the luminance channel
Images are expected to be of form (w,h,c) and float in [0,1].
"""
img = image.transpose(2,0,1).reshape(3,-1)
lum = np.array([.299, .587, .114]).dot(img).squeeze()
img = np.tile(lum[None,:],(3,1)).reshape((3,image.shape[0],image.shape[1]))
return img.transpose(1,2,0)
def rgb2luv(image):
img = image.transpose(2,0,1).reshape(3,-1)
luv = np.array([[.299, .587, .114],[-.147, -.288, .436],[.615, -.515, -.1]]).dot(img).reshape((3,image.shape[0],image.shape[1]))
return luv.transpose(1,2,0)
def luv2rgb(image):
img = image.transpose(2,0,1).reshape(3,-1)
rgb = np.array([[1, 0, 1.139],[1, -.395, -.580],[1, 2.03, 0]]).dot(img).reshape((3,image.shape[0],image.shape[1]))
return rgb.transpose(1,2,0)
def match_color(target_img, source_img, mode='pca', eps=1e-5):
'''
Matches the colour distribution of the target image to that of the source image
using a linear transform.
Images are expected to be of form (w,h,c) and float in [0,1].
Modes are chol, pca or sym for different choices of basis.
'''
mu_t = target_img.mean(0).mean(0)
t = target_img - mu_t
t = t.transpose(2,0,1).reshape(3,-1)
Ct = t.dot(t.T) / t.shape[1] + eps * np.eye(t.shape[0])
mu_s = source_img.mean(0).mean(0)
s = source_img - mu_s
s = s.transpose(2,0,1).reshape(3,-1)
Cs = s.dot(s.T) / s.shape[1] + eps * np.eye(s.shape[0])
if mode == 'chol':
chol_t = np.linalg.cholesky(Ct)
chol_s = np.linalg.cholesky(Cs)
ts = chol_s.dot(np.linalg.inv(chol_t)).dot(t)
if mode == 'pca':
eva_t, eve_t = np.linalg.eigh(Ct)
Qt = eve_t.dot(np.sqrt(np.diag(eva_t))).dot(eve_t.T)
eva_s, eve_s = np.linalg.eigh(Cs)
Qs = eve_s.dot(np.sqrt(np.diag(eva_s))).dot(eve_s.T)
ts = Qs.dot(np.linalg.inv(Qt)).dot(t)
if mode == 'sym':
eva_t, eve_t = np.linalg.eigh(Ct)
Qt = eve_t.dot(np.sqrt(np.diag(eva_t))).dot(eve_t.T)
Qt_Cs_Qt = Qt.dot(Cs).dot(Qt)
eva_QtCsQt, eve_QtCsQt = np.linalg.eigh(Qt_Cs_Qt)
QtCsQt = eve_QtCsQt.dot(np.sqrt(np.diag(eva_QtCsQt))).dot(eve_QtCsQt.T)
ts = np.linalg.inv(Qt).dot(QtCsQt).dot(np.linalg.inv(Qt)).dot(t)
matched_img = ts.reshape(*target_img.transpose(2,0,1).shape).transpose(1,2,0)
matched_img += mu_s
matched_img[matched_img>1] = 1
matched_img[matched_img<0] = 0
return matched_img
if cp_mode == 'lum':
style_img = args.style_image
content_img = args.content_image
org_content = args.org_content
org_content = imageio.imread(org_content, pilmode="RGB").astype(float)/256
style_img = imageio.imread(style_img, pilmode="RGB").astype(float)/256
content_img = imageio.imread(content_img, pilmode="RGB").astype(float)/256
org_content = content_img.copy()
style_img = lum_transform(style_img)
content_img = lum_transform(content_img)
style_img -= style_img.mean(0).mean(0)
style_img += content_img.mean(0).mean(0)
style_img [style_img < 0 ] = 0
style_img [style_img > 1 ] = 1
content_img [content_img < 0 ] = 0
content_img [content_img > 1 ] = 1
imsave(output_content_name, content_img)
imsave(output_style_name, style_img)
elif cp_mode =='match':
style_img = args.style_image
content_img = args.content_image
style_img = imageio.imread(style_img, pilmode="RGB").astype(float)/256
content_img = imageio.imread(content_img, pilmode="RGB").astype(float)/256
style_img = match_color(style_img, content_img, mode='pca')
imsave(output_style_name, style_img)
elif cp_mode == 'match_style':
style_img = args.style_image
content_img = args.content_image
style_img = imageio.imread(style_img, pilmode="RGB").astype(float)/256
content_img = imageio.imread(content_img, pilmode="RGB").astype(float)/256
content_img = match_color(content_img, style_img, mode='pca')
imsave(output_content_name, content_img)
elif cp_mode == 'lum2':
output = args.output_lum2
org_content = args.org_content
org_content = imageio.imread(org_content, pilmode="RGB").astype(float)/256
output = imageio.imread(output, pilmode="RGB").astype(float)/256
org_content = skimage.transform.resize(org_content, output.shape)
org_content = rgb2luv(org_content)
org_content[:,:,0] = output.mean(2)
output = luv2rgb(org_content)
output[output<0] = 0
output[output>1]=1
imsave(output_a_name, output)
else:
raise NameError('Unknown colour preservation mode')
|
import logging
import json
from datetime import datetime
import scraper_tools.title_list_scraper as title_list_scraper
import scraper_tools.data_processor as data_processor
import db_tools.db_reader as db_reader
import db_tools.db_writer as db_writer
def read_news_site_list(news_sites_file):
with open(news_sites_file, 'r') as file:
news_sites = json.load(file)
return news_sites
def scrape_all_sites(news_sites_file):
scrape_keys = []
day = datetime.now().strftime("%Y-%m-%d")
hour = datetime.now().strftime("%H:%M")
news_sites = read_news_site_list(news_sites_file)
for site in news_sites:
title_list = title_list_scraper.get_title_list_from_site(site)
scrape_key = db_writer.store_scraped_titles(site['name'],
day,
hour,
title_list)
scrape_keys.append(scrape_key)
return scrape_keys
def process_scraped_data(scrape_keys, words_stored):
processed_batch = []
result_list = db_reader.read_site_titles(scrape_keys)
for result in result_list:
scrape, raw_title_list = result
scrape_key, site, day, hour = scrape
if len(raw_title_list) > 1:
words_tuple_list = data_processor\
.words_tuple_list_from_titles(raw_title_list,
words_stored)
processed_batch.append(tuple([words_tuple_list,
scrape_key, site, day, hour]))
db_writer.store_words(processed_batch)
return len(str(processed_batch))
def generate_frontend_json(config):
frontend_data = db_reader.get_frontend_data(config['words_stored'],
config['max_days_in_json'])
with open(config['frontend_data_json'], 'w') as file:
json.dump(frontend_data, file)
return len(frontend_data['words']), len(frontend_data['datetimes'])
if __name__ == '__main__':
logging.basicConfig(filename='druidscrape.log',
filemode='a',
format='%(asctime)s - %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logging.info('---=== main_scraper.py ===---')
with open('config.json', 'r') as file:
config = json.load(file)
no_of_words, no_of_dtm = generate_frontend_json(config)
logging.info('JSON generated: ' + str(no_of_words) + ' words and '
+ str(no_of_dtm) + ' datetimes')
|
import os
import json
import numpy as np
from PIL import Image
from collections import Counter
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
id_map = {0: 24, 1: 25, 2: 26, 3: 27, 4: 28, 5: 31, 6: 32, 7: 33, 8: 0}
reversed_id_map = {24: 0, 25: 1, 26: 2, 27: 3, 28: 4, 31: 5, 32: 6, 33: 7, 0: 8, 29: 8, 30: 8}
def generate_gt(input_dir, gt_dir, output_dir, dataset_type='train', label_format='binary'):
n = 1
for file in os.listdir(os.path.join(input_dir, dataset_type)):
print(f'precessing image No.{n}')
n += 1
json_dir = os.path.join(input_dir, dataset_type, file)
with open(json_dir) as json_file:
data = json.load(json_file)
image_name = data["image_id"]
cityname = image_name.split('_')[0]
gtname = '_'.join(image_name.split('_')[0:3])
gt_instanceIds_image = Image.open(os.path.join(
gt_dir, dataset_type, cityname, gtname + '_gtFine_instanceIds.png'))
gt_instanceIds = np.asarray(gt_instanceIds_image)
for i, instance in enumerate(data["instances"]):
x1, y1, x2, y2 = instance["box"]
gt_ids = np.unique(gt_instanceIds[y1:y2, x1:x2])
gt_ids = (gt_ids // 1000).tolist()
while gt_ids and gt_ids[0] == 0:
gt_ids.pop(0)
gt_ids = [reversed_id_map[id] for id in gt_ids]
correctness = (instance["category"] in gt_ids)
if label_format == 'binary':
if correctness:
gt_ids.remove(instance["category"])
have_context = (gt_ids != [])
label_data = np.array([int(correctness), int(have_context)])
elif label_format == 'multi-class':
area = (x2 - x1) * (y2 - y1)
gt_box = (gt_instanceIds[y1:y2, x1:x2] // 1000)
cnt = Counter(gt_box.flatten())
label_data = np.zeros(10)
label_data[0] = correctness
for id in cnt:
percentage = cnt[id] / area
label_data[reversed_id_map[id] + 1] = percentage
else:
err_msg = "Unrecognized label format: {}.".format(str(label_format))
raise ValueError(err_msg)
if not os.path.exists(os.path.join(output_dir, dataset_type)):
os.makedirs(os.path.join(output_dir, dataset_type))
np.save(file=os.path.join(output_dir, dataset_type, image_name.split(
'.')[0] + f"_{i}.npy"), arr=label_data)
def simplify_dataset(feature_input_dir, feature_output_dir):
n = 1
for file in os.listdir(feature_input_dir):
print(f'precessing image No.{n}')
n += 1
feature_dir = os.path.join(feature_input_dir, file)
with open(feature_dir) as feature_file:
feature = json.load(feature_file)
image_name = feature["image_id"]
for i, instance in enumerate(feature['instances']):
feature_data = np.array(instance['feature_map'])
category = instance["category"]
x1, y1, x2, y2 = instance["box"]
proposal_idx = instance["proposal_idx"]
if not os.path.exists(feature_output_dir):
os.makedirs(feature_output_dir)
instance_name = "_".join((image_name.split('.')[0], str(category), str(
x1), str(y1), str(x2), str(y2), str(proposal_idx))) + ".npy"
np.save(file=os.path.join(feature_output_dir, instance_name), arr=feature_data)
def merge_labelspace(label):
label = label.type(torch.long)
label = torch.where(label < 0, 114, label) # other vehicles
label = torch.where(label <= 6, 108, label) # void
label = torch.where(label <= 10, 109, label) # flat
label = torch.where(label <= 16, 110, label) # construction
label = torch.where(label <= 20, 111, label) # object
label = torch.where(label <= 22, 112, label) # nature
label = torch.where(label == 23, 113, label) # sky
label = torch.where(label == 24, 100, label) # person
label = torch.where(label == 25, 101, label) # rider
label = torch.where(label == 26, 102, label) # car
label = torch.where(label == 27, 103, label) # truck
label = torch.where(label == 28, 104, label) # bus
label = torch.where(label == 31, 105, label) # train
label = torch.where(label == 32, 106, label) # motorcycle
label = torch.where(label == 33, 107, label) # bicycle
label = torch.where(label < 100, 114, label) # other vehicles
label = label - 100
return label
class PoolingFeatureDataset(Dataset):
def __init__(self, feature_dir, label_dir, model_type, zoom_rate=0, simple_labelspace=False):
assert os.path.exists(feature_dir)
assert os.path.exists(label_dir)
self.feature_dir = feature_dir
self.label_dir = label_dir
self.file_list = os.listdir(self.feature_dir)
self.model_type = model_type
self.zoom_rate = zoom_rate
self.simple_labelspace = simple_labelspace
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
if self.model_type == 'binary' or self.model_type == 'multi-class':
file_name = self.file_list[idx]
feature = np.load(file=os.path.join(self.feature_dir, file_name))
feature = torch.tensor(feature).float()
label = np.load(file=os.path.join(self.label_dir, file_name))
label = torch.tensor(label).float()
return feature, label
elif self.model_type == 'seg':
file_name = self.file_list[idx]
feature = np.load(file=os.path.join(self.feature_dir, file_name))
feature = torch.tensor(feature).float()
city_name = file_name.split('_')[0]
category, x1, y1, x2, y2 = [int(i) for i in file_name.split('.')[0].split('_')[4:9]]
category = torch.tensor(id_map[category])
gt_instanceIds = Image.open(os.path.join(
self.label_dir, city_name, '_'.join(file_name.split('_')[0:3]) + '_gtFine_labelIds.png'))
gt_instanceIds = np.asarray(gt_instanceIds)
x1 = int(max(0, x1 - self.zoom_rate * (x2 - x1) * 0.5))
x2 = int(min(gt_instanceIds.shape[1], x2 + self.zoom_rate * (x2 - x1) * 0.5))
y1 = int(max(0, y1 - self.zoom_rate * (y2 - y1) * 0.5))
y2 = int(min(gt_instanceIds.shape[0], y2 + self.zoom_rate * (y2 - y1) * 0.5))
bbox = torch.tensor([x1, y1, x2, y2])
label = torch.tensor(gt_instanceIds[y1:y2, x1:x2]).float()
if self.simple_labelspace:
label = merge_labelspace(label)
return [feature, bbox, category], label
elif self.model_type == 'rgb':
file_name = self.file_list[idx]
feature = np.load(file=os.path.join(self.feature_dir, file_name))
feature = torch.tensor(feature).float()
city_name = file_name.split('_')[0]
category, x1, y1, x2, y2 = [int(i) for i in file_name.split('.')[0].split('_')[4:9]]
category = torch.tensor(id_map[category])
image = Image.open(os.path.join(
self.label_dir, city_name, '_'.join(file_name.split('_')[0:3]) + '_leftImg8bit.png'))
image = np.asarray(image)
x1 = int(max(0, x1 - self.zoom_rate * (x2 - x1) * 0.5))
x2 = int(min(image.shape[1], x2 + self.zoom_rate * (x2 - x1) * 0.5))
y1 = int(max(0, y1 - self.zoom_rate * (y2 - y1) * 0.5))
y2 = int(min(image.shape[0], y2 + self.zoom_rate * (y2 - y1) * 0.5))
bbox = torch.tensor([x1, y1, x2, y2])
label = torch.tensor(image[y1:y2, x1:x2].transpose((2, 0, 1))).float()
return [feature, bbox, category], label
elif self.model_type == 'gray':
file_name = self.file_list[idx]
feature = np.load(file=os.path.join(self.feature_dir, file_name))
feature = torch.tensor(feature).float()
city_name = file_name.split('_')[0]
category, x1, y1, x2, y2 = [int(i) for i in file_name.split('.')[0].split('_')[4:9]]
category = torch.tensor(id_map[category])
image = Image.open(os.path.join(
self.label_dir, city_name, '_'.join(file_name.split('_')[0:3]) + '_leftImg8bit.png'))
image_transforms = transforms.Compose([transforms.Grayscale(1)])
image = image_transforms(image)
image = np.asarray(image)
x1 = int(max(0, x1 - self.zoom_rate * (x2 - x1) * 0.5))
x2 = int(min(image.shape[1], x2 + self.zoom_rate * (x2 - x1) * 0.5))
y1 = int(max(0, y1 - self.zoom_rate * (y2 - y1) * 0.5))
y2 = int(min(image.shape[0], y2 + self.zoom_rate * (y2 - y1) * 0.5))
bbox = torch.tensor([x1, y1, x2, y2])
label = torch.tensor(image[y1:y2, x1:x2]).unsqueeze(0).float()
return [feature, bbox, category], label
elif self.model_type == 'segprd':
file_name = self.file_list[idx]
feature = np.load(file=os.path.join(self.feature_dir, file_name))
feature = torch.tensor(feature).float()
city_name = file_name.split('_')[0]
category, x1, y1, x2, y2 = [int(i) for i in file_name.split('.')[0].split('_')[4:9]]
category = torch.tensor(id_map[category])
bbox = torch.tensor([x1, y1, x2, y2])
gt_instanceIds = Image.open(os.path.join(
self.label_dir, city_name, '_'.join(file_name.split('_')[0:3]) + '_gtFine_labelIds.png'))
gt_instanceIds = np.asarray(gt_instanceIds)
x1_z = int(max(0, x1 - self.zoom_rate * (x2 - x1) * 0.5))
x2_z = int(min(gt_instanceIds.shape[1], x2 + self.zoom_rate * (x2 - x1) * 0.5))
y1_z = int(max(0, y1 - self.zoom_rate * (y2 - y1) * 0.5))
y2_z = int(min(gt_instanceIds.shape[0], y2 + self.zoom_rate * (y2 - y1) * 0.5))
relative_coord = torch.tensor([x1-x1_z, y1-y1_z, x2-x1_z, y2-y1_z])
zoom_label = torch.tensor(gt_instanceIds[y1_z:y2_z, x1_z:x2_z]).float()
return [feature, bbox, category], [zoom_label, relative_coord]
# if __name__ == "__main__":
# generate_gt(input_dir='/lhome/peizhli/datasets/cityscapes/pooling_feature',
# gt_dir='/lhome/peizhli/datasets/cityscapes/gtFine',
# output_dir='/lhome/peizhli/datasets/cityscapes/pooling_gt(multi-class classifier)',
# dataset_type='val',
# label_format='multi-class')
# simplify_dataset(feature_input_dir='/lhome/peizhli/datasets/cityscapes/pooling_feature/train',
# feature_output_dir='/lhome/peizhli/datasets/cityscapes/pooling_feature_sim/train')
|
from django.conf.urls import url
from django.urls import path
from medrecords_app import views
app_name = "medrecords_app"
urlpatterns = [
url("record_create/", views.MedicalRecordCreateView.as_view(), name="medrecord_form"),
url("list", views.MedicalRecordView.as_view(), name="medical_record"),
url('^record_medical/update/(?P<pk>[\w-]+)$', views.MedicalRecordUpdate.as_view(), name='medrecord_update'),
url('^record_medical/delete/(?P<pk>[\w-]+)$', views.MedicalRecordDelete.as_view(), name='medrecord_delete'),
]
|
from PyQt4 import QtGui
from ui_mant_libros_new import NewLibrosWindow
from ui_mant_libros_edit import EditLibrosWindow
from ui_mant_libros_id_edit import GetIdEditWindow
# Debug only
import inspect
class MenuLibros(QtGui.QWidget):
"""
Ventana-menu para editar Libros
"""
def __init__(self):
super(MenuLibros, self).__init__()
self.createButtons()
self.setWindowTitle('Mantenimiento Libros')
self.setWindowIcon(QtGui.QIcon('images/user-plus.png'))
self.setWindowTitle("Mantenimiento Libros")
self.setGeometry(650, 300, 150, 100)
def createButtons(self):
btn_new_libros = QtGui.QPushButton('Nuevo')
btn_new_libros.clicked.connect(self.open_new_libros_window)
btn_edit_libros = QtGui.QPushButton('Editar')
btn_edit_libros.clicked.connect(self.open_edit_libros_window)
btn_list_libros = QtGui.QPushButton('Listar')
btn_list_libros.clicked.connect(self.close)
btn_delete_libros = QtGui.QPushButton('Eliminar')
btn_delete_libros.clicked.connect(self.close)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(btn_new_libros)
hbox.addWidget(btn_edit_libros)
hbox.addWidget(btn_list_libros)
hbox.addWidget(btn_delete_libros)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox)
self.setLayout(vbox)
def open_new_libros_window(self):
self.new_libros_view = NewLibrosWindow()
self.new_libros_view.show()
print(inspect.stack()[0][3])
self.close()
def open_edit_libros_window(self):
self.edit_libros_view = GetIdEditWindow()
self.edit_libros_view.show()
print(inspect.stack()[0][3])
self.close()
def open_list_reserva_window(self):
# self.new_reserva_view = NewReserva()
# self.new_reserva_view.show()
print(inspect.stack()[0][3])
self.close()
def open_delete_reserva_window(self):
# self.new_reserva_view = NewReserva()
# self.new_reserva_view.show()
print(inspect.stack()[0][3])
self.close()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
mainWin = MenuLibros()
mainWin.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
"""
Data generation for Figure 09
"""
import sys
sys.path.append("../main/")
from ODEdrop2D import *
from pdeloader import *
from matplotlib.gridspec import GridSpec
V = lambda t: Vperiodic(t,m=20,p=100,Vavg=2,Vamp=1)
theta = lambda x: 1 + .1*np.cos(1.6*np.pi*x) + 0.2*np.sin(0.2*np.pi*x)
# Solve the ODE problem
drop = ODEdrop2D(ic=(1,-1),t_end=1000,het=theta,V=V)
t = np.arange(0,1001)
fig = plt.figure(figsize=(8,3))
gs = GridSpec(1, 2, figure=fig,wspace=0.24)
ax0 = fig.add_subplot(gs[0],adjustable='box')
ax1 = fig.add_subplot(gs[1],adjustable='box')
# Load PDE
t_pde,d_pde , l_pde = pdeloader('Figure09_PDE.mat')
# Solve ODE
drop.solve()
ab = drop.evaluate(t)
# Plot of the midpoint
ax0.plot(V(t_pde)[0],l_pde,'k',lw=0.25)
ax0.plot(V(t)[0],np.mean(ab,axis=0),'--',lw=1)
ax0.set_xlabel('$v$')
ax0.set_ylabel('$\\ell$')
ax0.text(0.3,ax0.get_ylim()[1],'(a)')
# Plot of the radius
ax1.plot(V(t_pde)[0],d_pde,'k',lw=0.25,label='PDE')
ax1.plot(V(t)[0],0.5*(ab[0]-ab[1]),'--',lw=1,label='ODEs')
ax1.set_xlabel('$v$')
ax1.set_ylabel('$d$')
ax1.legend()
ax1.text(0.5,ax1.get_ylim()[1],'(b)')
plt.savefig('Figure09.png', bbox_inches='tight',dpi=200) |
import unittest
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from scipy.special import expit
from sklearn.linear_model import LinearRegression, LogisticRegression
from functools import partial
from skater.core.local_interpretation.lime.lime_tabular import LimeTabularExplainer
class TestLime(unittest.TestCase):
"""
Test imported lime package
"""
def setUp(self):
"""
Build data for testing
:param n:
:param dim:
:return:
"""
self.X = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
])
self.n, self.dim = self.X.shape
self.feature_names = ['x{}'.format(i) for i in range(self.dim)]
self.index = ['{}'.format(i) for i in range(self.n)]
self.B = np.array([-5, 0, 5])
self.y = np.dot(self.X, self.B) + np.random.normal(0, .01, size=self.n)
self.y_for_classifier = np.round(expit(self.y))
self.example = self.X[0]
self.seed = 1
self.regressor = LinearRegression()
self.regressor.fit(self.X, self.y)
self.classifier = LogisticRegression()
self.classifier.fit(self.X, self.y_for_classifier)
self.model_regressor = LinearRegression()
def test_regression_with_feature_names(self):
"""
Ensure lime.lime_tabular works when predict_fn = regressor.predict
and feature names are passed
:return:
"""
interpretor = LimeTabularExplainer(self.X, feature_names=self.feature_names, mode="regression")
assert interpretor.explain_instance(self.example, self.regressor.predict)
def test_regression_without_feature_names(self):
"""
Ensure lime.lime_tabular works when predict_fn = regressor.predict
and feature names are NOT passed
:return:
"""
interpretor = LimeTabularExplainer(self.X, mode="regression")
assert interpretor.explain_instance(self.example, self.regressor.predict)
def test_classifier_no_proba_without_feature_names(self):
"""
Ensure lime.lime_tabular works when predict_fn = classifier.predict
and feature names are NOT passed
:return:
"""
interpretor = LimeTabularExplainer(self.X)
interpretor_func = partial(interpretor.explain_instance, *[self.example, self.classifier.predict])
self.assertRaises(NotImplementedError, interpretor_func)
def test_classifier_with_proba_without_feature_names(self):
"""
Ensure lime.lime_tabular works when predict_fn = classifier.predict_proba
and feature names are NOT passed
:return:
"""
interpretor = LimeTabularExplainer(self.X)
assert interpretor.explain_instance(self.example, self.classifier.predict_proba)
def test_classifier_no_proba_with_feature_names(self):
"""
Ensure lime.lime_tabular works when predict_fn = classifier.predict
and feature names are passed
:return:
"""
interpretor = LimeTabularExplainer(self.X, feature_names=self.feature_names)
interpretor_func = partial(interpretor.explain_instance, *[self.example, self.classifier.predict])
self.assertRaises(NotImplementedError, interpretor_func)
def test_classifier_with_proba_with_feature_names(self):
"""
Ensure lime.lime_tabular works when predict_fn = classifier.predict_proba
and feature names are passed
:return:
"""
interpretor = LimeTabularExplainer(self.X, feature_names=self.feature_names)
assert interpretor.explain_instance(self.example, self.classifier.predict_proba)
def test_lime_coef_accuracy(self):
"""
Ensure that for a trivial example, the coefficients of a regressor explanation
are all similar to the true beta values of the generative process.
:return:
"""
error_epsilon = .1
explainer = LimeTabularExplainer(self.X,
discretize_continuous=True, mode="regression")
explanation = explainer.explain_instance(self.example,
self.regressor.predict,
model_regressor=self.model_regressor)
vals = dict(explanation.as_list())
keys = ['{} <= 0.00'.format(i) for i in [2, 1, 0]]
lime_coefs = np.array([vals[key] for key in keys])
assert (abs(self.regressor.coef_ - lime_coefs) < error_epsilon).all()
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run(unittest.makeSuite(TestLime))
|
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
__version__ = "0"
"""Some packages' test package initialisation."""
|
import os
from pathlib import Path
DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("AVOCADO_ROOT", "~/.avocado/mainnet"))).resolve()
|
from typing import Any, Optional
class VerifierResult():
error: Optional[str]
result: Any
|
# When you import a directory but you havent given a specific file name then it will import __init__.py by default
from controllers.user_controller import user # Importing the user blueprint
from controllers.word_controller import words
from controllers.auth_controller import auth
registerable_controllers = [
user,
words,
auth
]
|
# Random Walk mit beliebigem Winkel und variabler Schrittlänge
import turtle as t
import random as r
import math
WIDTH = 800
HEIGHT = 800
wn = t.Screen()
wn.setup(WIDTH, HEIGHT)
wn.colormode(255)
wn.bgcolor(50, 50, 50)
wn.title("Random-Walk (3)")
def distance(a, b):
return(math.sqrt(a**2 + b**2)) #/SL
colors = ["white", "yellow", "orange", "green", "dodger blue", "purple", "red"]
alex = t.Turtle()
alex.speed(0)
alex.pendown()
alex.goto(0, 0)
start_x, start_y = 0, 0
for i in range(5000):
if distance(alex.xcor(), alex.ycor()) < 50:
color = 0
elif distance(alex.xcor(), alex.ycor()) < 100:
color = 1
elif distance(alex.xcor(), alex.ycor()) < 150:
color = 2
elif distance(alex.xcor(), alex.ycor()) < 200:
color = 3
elif distance(alex.xcor(), alex.ycor()) < 250:
color = 4
elif distance(alex.xcor(), alex.ycor()) < 300:
color = 5
else:
color = 6
alex.pencolor(colors[color])
x_dir = r.choice([-1, 1])
# ohne Lévy-Flight
# x_dist = r.choice([0, 1, 2, 3])
# mit Lévy-Flight
x_dist = r.choice([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 10])
new_x = x_dist*x_dir
y_dir = r.choice([-1, 1])
# ohne Lévy-Flight
# y_dist = r.choice([0, 1, 2, 3])
# mit Lévy-Flight
y_dist = r.choice([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 10])
new_y = y_dist*y_dir
# Ignoriere Schritte, die nirgendwohin führen
if new_x == 0 and new_y == 0:
continue
alex.goto(start_x + new_x, start_y + new_y)
start_x += new_x
if start_x > WIDTH/2:
start_x = WIDTH/2
elif start_x < -WIDTH/2:
start_x = -WIDTH/2
start_y += new_y
if start_y > HEIGHT/2:
start_y = HEIGHT/2
elif start_y < -HEIGHT/2:
start_y = -HEIGHT/2
print("I did it, Babe!")
wn.mainloop() |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class SdkLanguageOptionalParameters(object):
"""
List of additional applicable parameters for any given target language.
"""
#: A constant which can be used with the input_type property of a SdkLanguageOptionalParameters.
#: This constant has a value of "ENUM"
INPUT_TYPE_ENUM = "ENUM"
#: A constant which can be used with the input_type property of a SdkLanguageOptionalParameters.
#: This constant has a value of "EMAIL"
INPUT_TYPE_EMAIL = "EMAIL"
#: A constant which can be used with the input_type property of a SdkLanguageOptionalParameters.
#: This constant has a value of "URI"
INPUT_TYPE_URI = "URI"
#: A constant which can be used with the input_type property of a SdkLanguageOptionalParameters.
#: This constant has a value of "STRING"
INPUT_TYPE_STRING = "STRING"
def __init__(self, **kwargs):
"""
Initializes a new SdkLanguageOptionalParameters object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param param_name:
The value to assign to the param_name property of this SdkLanguageOptionalParameters.
:type param_name: str
:param display_name:
The value to assign to the display_name property of this SdkLanguageOptionalParameters.
:type display_name: str
:param description:
The value to assign to the description property of this SdkLanguageOptionalParameters.
:type description: str
:param is_required:
The value to assign to the is_required property of this SdkLanguageOptionalParameters.
:type is_required: bool
:param max_size:
The value to assign to the max_size property of this SdkLanguageOptionalParameters.
:type max_size: float
:param input_type:
The value to assign to the input_type property of this SdkLanguageOptionalParameters.
Allowed values for this property are: "ENUM", "EMAIL", "URI", "STRING", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type input_type: str
:param allowed_values:
The value to assign to the allowed_values property of this SdkLanguageOptionalParameters.
:type allowed_values: list[oci.apigateway.models.SdkLanguageOptionalParametersAllowedValue]
"""
self.swagger_types = {
'param_name': 'str',
'display_name': 'str',
'description': 'str',
'is_required': 'bool',
'max_size': 'float',
'input_type': 'str',
'allowed_values': 'list[SdkLanguageOptionalParametersAllowedValue]'
}
self.attribute_map = {
'param_name': 'paramName',
'display_name': 'displayName',
'description': 'description',
'is_required': 'isRequired',
'max_size': 'maxSize',
'input_type': 'inputType',
'allowed_values': 'allowedValues'
}
self._param_name = None
self._display_name = None
self._description = None
self._is_required = None
self._max_size = None
self._input_type = None
self._allowed_values = None
@property
def param_name(self):
"""
**[Required]** Gets the param_name of this SdkLanguageOptionalParameters.
Name of the parameter.
:return: The param_name of this SdkLanguageOptionalParameters.
:rtype: str
"""
return self._param_name
@param_name.setter
def param_name(self, param_name):
"""
Sets the param_name of this SdkLanguageOptionalParameters.
Name of the parameter.
:param param_name: The param_name of this SdkLanguageOptionalParameters.
:type: str
"""
self._param_name = param_name
@property
def display_name(self):
"""
Gets the display_name of this SdkLanguageOptionalParameters.
Display name of the parameter.
:return: The display_name of this SdkLanguageOptionalParameters.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this SdkLanguageOptionalParameters.
Display name of the parameter.
:param display_name: The display_name of this SdkLanguageOptionalParameters.
:type: str
"""
self._display_name = display_name
@property
def description(self):
"""
Gets the description of this SdkLanguageOptionalParameters.
Description for the parameter.
:return: The description of this SdkLanguageOptionalParameters.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this SdkLanguageOptionalParameters.
Description for the parameter.
:param description: The description of this SdkLanguageOptionalParameters.
:type: str
"""
self._description = description
@property
def is_required(self):
"""
Gets the is_required of this SdkLanguageOptionalParameters.
Information on whether the parameter is required or not.
:return: The is_required of this SdkLanguageOptionalParameters.
:rtype: bool
"""
return self._is_required
@is_required.setter
def is_required(self, is_required):
"""
Sets the is_required of this SdkLanguageOptionalParameters.
Information on whether the parameter is required or not.
:param is_required: The is_required of this SdkLanguageOptionalParameters.
:type: bool
"""
self._is_required = is_required
@property
def max_size(self):
"""
Gets the max_size of this SdkLanguageOptionalParameters.
Maximum size as input value for this parameter.
:return: The max_size of this SdkLanguageOptionalParameters.
:rtype: float
"""
return self._max_size
@max_size.setter
def max_size(self, max_size):
"""
Sets the max_size of this SdkLanguageOptionalParameters.
Maximum size as input value for this parameter.
:param max_size: The max_size of this SdkLanguageOptionalParameters.
:type: float
"""
self._max_size = max_size
@property
def input_type(self):
"""
Gets the input_type of this SdkLanguageOptionalParameters.
The input type for this param.
- Input type is ENUM when only specific list of input strings are allowed.
- Input type is EMAIL when input type is an email ID.
- Input type is URI when input type is an URI.
- Input type is STRING in all other cases.
Allowed values for this property are: "ENUM", "EMAIL", "URI", "STRING", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The input_type of this SdkLanguageOptionalParameters.
:rtype: str
"""
return self._input_type
@input_type.setter
def input_type(self, input_type):
"""
Sets the input_type of this SdkLanguageOptionalParameters.
The input type for this param.
- Input type is ENUM when only specific list of input strings are allowed.
- Input type is EMAIL when input type is an email ID.
- Input type is URI when input type is an URI.
- Input type is STRING in all other cases.
:param input_type: The input_type of this SdkLanguageOptionalParameters.
:type: str
"""
allowed_values = ["ENUM", "EMAIL", "URI", "STRING"]
if not value_allowed_none_or_none_sentinel(input_type, allowed_values):
input_type = 'UNKNOWN_ENUM_VALUE'
self._input_type = input_type
@property
def allowed_values(self):
"""
Gets the allowed_values of this SdkLanguageOptionalParameters.
List of allowed input values.
Example: `[{\"name\": \"name1\", \"description\": \"description1\"}, ...]`
:return: The allowed_values of this SdkLanguageOptionalParameters.
:rtype: list[oci.apigateway.models.SdkLanguageOptionalParametersAllowedValue]
"""
return self._allowed_values
@allowed_values.setter
def allowed_values(self, allowed_values):
"""
Sets the allowed_values of this SdkLanguageOptionalParameters.
List of allowed input values.
Example: `[{\"name\": \"name1\", \"description\": \"description1\"}, ...]`
:param allowed_values: The allowed_values of this SdkLanguageOptionalParameters.
:type: list[oci.apigateway.models.SdkLanguageOptionalParametersAllowedValue]
"""
self._allowed_values = allowed_values
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
# -*- coding: utf-8 -*-
'''print('hello, world')
a = 123
print(a);
print(not True)
classmate = ['Michael','Bob','Tracy']
print(classmate)
print(classmate[0])
classmate.clear()
#classmate.pop()
print(classmate)
classmates = ('Michael', 'Bob', 'Tracy')
print(classmates)
t =(1,)
print(t)
age = 20
if age >= 6:
print('teenager')
elif age > 18:
print('adult')
else:
print('kid')
s = input('birth: ')
birth = int(s)
if birth < 2000:
print('00前')
else:
print('00后')
names = ['Michael','Bob','Tracy']
for name in names:
print(name)
sum = 0
for x in [1,2,3,4,5,6,7,8,9,10]:
sum = sum + x
print(sum)
asum = 0
for y in list(range(101)):
asum = asum + y
print(asum)
d = {'Michael': 95,'Bob': 75,'Tracy': 85}
print(d['Michael'])
aflag = 'Bob' in d
print(aflag)
print(d.get('Michael'))
print(d.get('Mi',-1))
a = 'abc'
print(a.replace('a','A'))
print(a)
print(1+2+3)
import math
def my_abs(x):
if not isinstance(x,(int,float)):
raise TypeError('bad operand type')
if x > 0:
return x
else:
return -x
def move(x,y,step,angle=0):
nx = x + step*math.asin(angle)
ny = y + step*math.acos(angle)
return nx,ny
ax,ay = move(2,3,3)
print(ax,ay)
r = move(100,100,60,math.pi / 6)
print(r)
def power(x):
return x * x
def calc(*numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc())'''
"""def hello(greeting,*args):
if (len(args)==0):
print('%s!' % greeting)
else:
print('%s, %s!' % (greeting, ', '.join(args)))
print(hello('Hello', 'Michael', 'Bob', 'Adam'))
names = ('tom','jmk')
print(hello('Hello',*names))
def fact(n):
if n==1:
return 1
return n*fact(n-1)
names = ['Micheal','sun','tracy']
for name in names:
print(name)
print(list(range(5)))
# __name__=='main'
d = {'Michael':95,'Bob':75,"Tracy":85}
print(d['Michael'])
L = ['Michael', 'Sarah', 'Tracy', 'Bob', 'Jack']
d = {'a':1,'b':2,'c':3}
for key in d:
print(key)
for value in d.values():
print(value)
for k,v in d.items():
print(k,'==',v)
print(list(range(1,11)))
print([x*x for x in range(1,11)])
def odd():
print("step1")
yield(1)
print("step 2")
yield(2)
print("step 3")
yield(3)
o = odd()
next(o)
next(o)
next(o)"""
"""def triangles():
N=[1]
while True:
yield N
N.append(0)
N=[N[i-1] + N[i] for i in range(len(N))]
n=0
for t in triangles():
print(t)
n=n+1
if n == 10:
break
还是让我们过一遍代码:
1行,定义函数
2行,给N赋值
4行,函数遇到yield返回N=[1]
5行,给N添加一个元素,此时N=[1,0]
6行,高潮来啦,还是分开讲,range(len(N))=[0,1],
so, N = [N[i-1]+N[i] for i in [0,1]]
so, N = [N[0-1]+N[0] , N[1-1]+N[1]]
so, N = [0+1 , 1+0] = [1,1]
这样,杨辉三角的第二行就出来啦!"""
def f(x):
return x*x
print(list(map(f,[1,2,3,4])))
from functools import reduce
def fn(x,y):
return 10 * x + y
print(reduce(fn,[1,3,5,7,9]))
def not_empty(s):
return s and s.strip()
print(list(filter(not_empty, ['A', ' ', 'B', None, 'C', ' '])))
def lazy_sum(*args):
def sum():
ax = 0
for n in args:
ax = ax + n
return ax
return sum
""" def log(func):
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
@log
def now():
print('2015-3-25')
def log(text):
def decorator(func):
def wrapper(*args,**kw):
print('%s %s():' % (text, func.__name__))
return func(*args,**kw)
return wrapper
return decorator
@log('execute')
def now():
print('2018-09-08')
now()"""
"""def consumer():
r = ''
while True:
n = yield r
if not n:
return
print('[CONSUMER] Consuming %s...' % n)
r = '200 OK'
def produce(c):
c.send(None)
n = 0
while n < 5:
n = n + 1
print('[PRODUCER] Producing %s...' % n)
r = c.send(n)
print('[PRODUCER] Consumer return: %s' % r)
c.close()
c = consumer()
produce(c)"""
'''from urllib import request
with request.urlopen('https://api.douban.com/v2/book/2129650') as f:
data = f.read()
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', data.decode('utf-8'))
req = request.Request('http://www.douban.com/')
req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
with request.urlopen(req) as f:
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', f.read().decode('utf-8'))
from datetime import datetime
print(datetime.now())
print(dir('ABC'))
class MyObject(object):
def __init__(self):
self.x = 9
def power(self):
return self.x * self.x
obj = MyObject()
print(obj.power())
from datetime import datetime
with open('test.txt','w',encoding='utf-8') as f:
f.write('今天是')
f.write(datetime.now().strftime('%Y-%m-%d'))
with open('test.txt','r',encoding='utf-8') as f:
s = f.read()
print('prepare read file')
print(s)
with open('test.txt','rb') as f:
s = f.read()
print('prepare read file')
print(s) '''
def power(x,n=5):
s= 1
while n >0 :
n = n-1
s = s * x
return s
print(power(3))
def hello(greeting, *args):
if (len(args)==0):
print('%s!' % greeting)
else:
print('%s, %s!' % (greeting, ', '.join(args)))
hello('Hi', 'Sarah')
names = ('Bart', 'Lisa')
hello('Hello', *names)
def person(name,age,**kw):
print('name:',name,'age:',age,'other:',kw)
person('Michael',30,city='Beijing',job='work1')
|
import unittest
from nose.tools import (assert_is_not_none, assert_true, assert_raises,
assert_in, assert_equal, assert_less_equal)
import numpy
from sklearn.base import clone
from sknn.mlp import Classifier as MLPC
from sknn.mlp import Layer as L, Convolution as C
class TestClassifierFunctionality(unittest.TestCase):
def setUp(self):
self.nn = MLPC(layers=[L("Softmax")], n_iter=1)
def test_IsClassifier(self):
assert_true(self.nn.is_classifier)
def test_FitAutoInitialize(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
self.nn.fit(a_in, a_out)
assert_true(self.nn.is_initialized)
def test_ExplicitValidSet(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
self.nn.valid_set = (a_in, a_out)
self.nn.fit(a_in, a_out)
assert_true(self.nn.is_initialized)
def test_PartialFit(self):
a_in, a_out = numpy.zeros((8,4)), numpy.random.randint(0, 5, (8,))
self.nn.partial_fit(a_in, a_out, classes=[0,1,2,3])
self.nn.partial_fit(a_in*2.0, a_out+1, classes=[0,1,2,3])
def test_PredictUninitializedNoUnitCount(self):
a_in = numpy.zeros((8,16))
assert_raises(AssertionError, self.nn.predict, a_in)
def test_PredictUninitializedNoLabels(self):
self.nn.layers[-1].units = 4
a_in = numpy.zeros((8,16))
assert_raises(AssertionError, self.nn.predict, a_in)
def test_PredictBinaryProbability(self):
a_in = numpy.random.uniform(-1.0, 1.0, size=(8,16))
a_out = numpy.array((a_in.sum(axis=1) >= 0.0), dtype=numpy.int32)
a_out[0], a_out[-1] = 0, 1
self.nn.fit(a_in, a_out)
a_proba = self.nn.predict_proba(a_in)
a_test = self.nn.predict(a_in)
c_out = numpy.unique(a_out)
assert_equal(2, c_out.shape[0])
assert_equal((8, 2), a_proba.shape)
assert_true((a_proba >= 0.0).all())
assert_true((a_proba <= 1.0).all())
assert_true((abs(a_proba.sum(axis=1) - 1.0) < 1E-9).all())
def test_PredictClasses(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
self.nn.fit(a_in, a_out)
self.nn.batch_size = 4
a_test = self.nn.predict(a_in)
assert_equal(type(a_out), type(a_test))
assert_equal(a_out.shape[0], a_test.shape[0])
c_out = numpy.unique(a_out)
assert_equal(len(self.nn.classes_), 1)
assert_true((self.nn.classes_[0] == c_out).all())
def test_PredictLargerBatchSize(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,1))
self.nn.batch_size = 32
self.nn.fit(a_in, a_out)
a_test = self.nn.predict(a_in)
assert_equal(type(a_out), type(a_test))
assert_equal(a_out.shape[0], a_test.shape[0])
def test_PredictMultiClass(self):
a_in, a_out = numpy.zeros((32,16)), numpy.random.randint(0, 3, (32,2))
self.nn.fit(a_in, a_out)
a_test = self.nn.predict(a_in)
assert_equal(type(a_out), type(a_test))
assert_equal(a_out.shape, a_test.shape)
assert_equal(len(self.nn.classes_), 2)
assert_equal(self.nn.classes_[0].shape[0], 3)
assert_equal(self.nn.classes_[1].shape[0], 3)
def test_EstimateProbalities(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
self.nn.fit(a_in, a_out)
a_proba = self.nn.predict_proba(a_in)
assert_equal(type(a_out), type(a_proba))
assert_equal(a_in.shape[0], a_proba.shape[0])
assert_true((a_proba >= 0.0).all())
assert_true((a_proba <= 1.0).all())
assert_true((abs(a_proba.sum(axis=1) - 1.0) < 1E-9).all())
def test_MultipleProbalitiesAsList(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,4))
self.nn.fit(a_in, a_out)
a_proba = self.nn.predict_proba(a_in)
assert_equal(list, type(a_proba))
assert_equal(4, len(a_proba))
for p in a_proba:
assert_equal(a_in.shape[0], p.shape[0])
assert_less_equal(p.shape[1], 5)
assert_true((p >= 0.0).all())
assert_true((p <= 1.0).all())
assert_true((abs(p.sum(axis=1) - 1.0) < 1E-9).all())
def test_CalculateScore(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
self.nn.fit(a_in, a_out)
f = self.nn.score(a_in, a_out)
assert_equal(type(f), numpy.float64)
class TestClassifierClone(TestClassifierFunctionality):
def setUp(self):
cc = MLPC(layers=[L("Sigmoid")], n_iter=1)
self.nn = clone(cc)
# This runs the same tests on the clone as for the original above.
class TestClassifierInterface(unittest.TestCase):
def check_values(self, params):
assert_equal(params['learning_rate'], 0.05)
assert_equal(params['n_iter'], 456)
assert_equal(params['n_stable'], 123)
assert_equal(params['dropout_rate'], 0.25)
assert_equal(params['regularize'], 'dropout')
assert_equal(params['valid_size'], 0.2)
def test_GetParamValues(self):
nn = MLPC(layers=[L("Linear")], learning_rate=0.05, n_iter=456,
n_stable=123, valid_size=0.2, dropout_rate=0.25)
params = nn.get_params()
self.check_values(params)
def test_CloneWithValues(self):
nn = MLPC(layers=[L("Linear")], learning_rate=0.05, n_iter=456,
n_stable=123, valid_size=0.2, dropout_rate=0.25)
cc = clone(nn)
params = cc.get_params()
self.check_values(params)
def check_defaults(self, params):
assert_equal(params['learning_rate'], 0.01)
assert_equal(params['n_iter'], None)
assert_equal(params['n_stable'], 10)
assert_equal(params['regularize'], None)
assert_equal(params['valid_size'], 0.0)
def test_GetParamDefaults(self):
nn = MLPC(layers=[L("Gaussian")])
params = nn.get_params()
self.check_defaults(params)
def test_CloneDefaults(self):
nn = MLPC(layers=[L("Gaussian")])
cc = clone(nn)
params = cc.get_params()
self.check_defaults(params)
def test_ConvertToString(self):
nn = MLPC(layers=[L("Gaussian")])
assert_equal(str, type(str(nn)))
def test_RepresentationDenseLayer(self):
nn = MLPC(layers=[L("Gaussian")])
r = repr(nn)
assert_equal(str, type(r))
assert_in("sknn.nn.Layer `Gaussian`", r)
def test_RepresentationConvolution(self):
nn = MLPC(layers=[C("Rectifier")])
r = repr(nn)
assert_equal(str, type(r))
assert_in("sknn.nn.Convolution `Rectifier`", r)
|
from django.core.management.base import BaseCommand
from app.models import Tasks
import datetime
import pytz
from django.core.mail import send_mail
from django.conf import settings
class Command(BaseCommand):
help = 'Send notifications fors tasks that are close to \
scheduled date/time'
def handle(self, *args, **options):
tasks = Tasks.objects.all()
now = datetime.datetime.now()
for i in tasks:
if i.sch_date_time.date() == now.date():
remaining = i.sch_date_time - pytz.timezone("Asia/Kolkata").\
localize(now)
rem_hours = remaining.seconds/(60*60)
if rem_hours <= 1:
subject = "ToDo App reminder."
message = f"Hi {i.user_id.username}, \n \
Below scheduled task will begin within an hour. \n \
- TASK: {i.title} \n \
- DESC: {i.desc} \n \
- Scheduled date: {i.sch_date_time.date()} \n \
- Scheduled time: {i.sch_date_time.time()}"
email_from = settings.EMAIL_HOST_USER
recipient_list = [i.user_id.email,]
send_mail(subject, message, email_from, recipient_list)
self.stdout.write("########################################")
self.stdout.write("Notification has been sent successfully for:")
self.stdout.write(f'TASK: {i.title}')
self.stdout.write(f'DESC: {i.desc}')
self.stdout.write(f'Email: {i.user_id.email}')
self.stdout.write("########################################")
|
import json
import unittest
from automerge_backend import Backend, default_sync_state
from automerge import doc
from automerge.datatypes import Counter
# we want changes to have the same timestamp to make the tests deterministic
# (prevents the bloom filter from failing)
ts = lambda: 0
# TODO: Some tests have `0` timestamps but no actor ids, so they won't be deterministic
# What's going on here?
ai1 = "02ef21f3c9eb4087880ebedd7c4bbe43"
ai2 = "2a1d376b24f744008d4af58252d644dd"
def sync(nA, nB, a_sync_state=None, b_sync_state=None):
if a_sync_state is None:
a_sync_state = default_sync_state()
if b_sync_state is None:
b_sync_state = default_sync_state()
i = MAX_ITER = 10
a_to_b_msg = b_to_a_msg = None
while True:
if i == 0:
raise Exception(
f"Did not synchronize within {MAX_ITER} iterations. Do you have a bug causing an infinite loop?"
)
a_to_b_msg = nA.generate_sync_message(a_sync_state)
b_to_a_msg = nB.generate_sync_message(b_sync_state)
if a_to_b_msg:
nB.receive_sync_message(b_sync_state, a_to_b_msg)
if b_to_a_msg:
nA.receive_sync_message(a_sync_state, b_to_a_msg)
if not a_to_b_msg and not b_to_a_msg:
break
i -= 1
return a_sync_state, b_sync_state
class AlreadyInSync(unittest.TestCase):
def test_not_reply_if_we_have_no_data_as_well(self):
n1, n2 = doc.Doc(backend=Backend()), doc.Doc(backend=Backend())
s1, s2 = default_sync_state(), default_sync_state()
m1 = m2 = None
m1 = n1.generate_sync_message(s1)
n2.receive_sync_message(s2, m1)
m2 = n2.generate_sync_message(s2)
self.assertEqual(m2, None)
def test_repos_with_equal_heads_do_not_need_a_reply_message(self):
n1, n2 = (
doc.Doc(backend=Backend(), timestamper=ts, initial_data={"n": []}),
doc.Doc(backend=Backend(), timestamper=ts),
)
s1, s2 = default_sync_state(), default_sync_state()
for i in range(0, 10):
with n1 as d:
d["n"].append(i)
patch = n2.apply_changes(n1.get_all_changes())
self.assertEqual(n1, n2)
m1 = n1.generate_sync_message(s1)
self.assertEqual(s1.last_sent_heads, n1.get_heads())
n2.receive_sync_message(s2, m1)
m2 = n2.generate_sync_message(s2)
self.assertEqual(m2, None)
def test_offer_all_changes_to_n2_when_starting_from_nothing(self):
n1, n2 = (
doc.Doc(backend=Backend(), timestamper=ts, initial_data={"n": []}),
doc.Doc(backend=Backend(), timestamper=ts),
)
for i in range(0, 10):
with n1 as d:
d["n"].append(i)
self.assertNotEqual(n1, n2)
sync(n1, n2)
self.assertEqual(n1, n2)
# SKIPPED: Isn't this identical to the previous test?
# def test_sync_peers_when_one_has_commits_the_other_does_not(self):
def test_work_with_prior_sync_state(self):
n1, n2 = doc.Doc(backend=Backend(), timestamper=ts), doc.Doc(
backend=Backend(), timestamper=ts
)
s1, s2 = default_sync_state(), default_sync_state()
for i in range(0, 5):
with n1 as d:
d["x"] = i
for i in range(5, 10):
with n1 as d:
d["x"] = i
self.assertNotEqual(n1, n2)
sync(n1, n2, s1, s2)
self.assertEqual(n1, n2)
|
"""
outpu by npy
"""
import xml.etree.ElementTree as ET
import cv2
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
import matplotlib
# return number of vectors
def compute_vector(
black, pre, nxt, result, result_l, result_y, result_x, result_z, ks, zv, sgm
):
img_l = black.copy() # likelihood image
img_l[nxt[1] + ks, nxt[0] + ks] = 255
img_l = cv2.GaussianBlur(
img_l, ksize=(int(ks * 2) + 1, int(ks * 2) + 1), sigmaX=sgm
)
img_l = img_l / img_l.max()
points = np.where(img_l > 0)
img_y = black.copy()
img_x = black.copy()
img_z = black.copy()
for y, x in zip(points[0], points[1]):
v3d = pre + [ks, ks] - [x, y]
# v3d = np.append(v3d, 1.4434)
# v3d = np.append(v3d, 2.7632)
v3d = np.append(v3d, zv)
v3d = v3d / np.linalg.norm(v3d) * img_l[y, x]
img_y[y, x] = v3d[1]
img_x[y, x] = v3d[0]
img_z[y, x] = v3d[2]
img_i = result_l - img_l
result_y = np.where(img_i < 0, img_y, result_y)
result_x = np.where(img_i < 0, img_x, result_x)
result_z = np.where(img_i < 0, img_z, result_z)
img_i = img_l.copy()
img_i[img_i == 0] = 2
img_i = result_l - img_i
result[img_i == 0] += 1
result_y += np.where(img_i == 0, img_y, 0)
result_x += np.where(img_i == 0, img_x, 0)
result_z += np.where(img_i == 0, img_z, 0)
result_l = np.maximum(result_l, img_l)
return result, result_l, result_y, result_x, result_z
# migration mean
movement = [
[1.4434, 1.3197942503607587],
[2.7377, 2.195258089759622],
[4.0067, 3.0202187134378864],
[5.2538, 3.8274910979238674],
[6.4813, 4.618897370307621],
[7.6874, 5.388044434257328],
[8.884, 6.133690378378062],
[10.0534, 6.843635116056122],
[11.2083, 7.529895064500173],
[12.3036, 8.172467388870148],
[13.4689, 8.847465726604845],
[14.4967, 9.43786499684944],
[15.5675, 10.046110063303223],
[16.6942, 10.678688049700735],
[17.6606, 11.222874594749634],
[18.7754, 11.83313712025504],
[19.8107, 12.411461893903311],
[20.74, 12.936127125203171],
[21.6791, 13.45838277173517],
]
###############hyperparameter##################
KS = 50 # kernel size
SGM = 6 # sigma
Z_VALUE = 5 # temporal axis
def generate_flow(track_let, save_path, itv=1, height=1040, width=1392):
track_let = track_let.astype(int)
frames = np.unique(track_let[:, 0])
ids = np.unique(track_let[:, 1])
black = np.zeros((height + KS * 2, width + KS * 2))
ones = np.ones((height + KS * 2, width + KS * 2))
par_id = -1
i = 1
result = ones.copy()
result_lm = black.copy()
result_y = black.copy()
result_x = black.copy()
result_z = black.copy()
for j in ids:
index_check = len(track_let[(track_let[:, 0] == i) & (track_let[:, 1] == j)])
index_chnxt = len(
track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)]
)
if index_chnxt != 0:
par_id = track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)][
0, -1
]
if (index_check != 0) & (index_chnxt != 0):
data = track_let[(track_let[:, 0] == i) & (track_let[:, 1] == j)][0][2:-1]
dnxt = track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)][0][
2:-1
]
result, result_lm, result_y, result_x, result_z = compute_vector(
black,
data,
dnxt,
result,
result_lm,
result_y,
result_x,
result_z,
KS,
Z_VALUE,
SGM,
)
elif (index_check == 0) & (index_chnxt != 0) & (par_id != -1):
if (
len(track_let[(track_let[:, 0] == i) & (track_let[:, 1] == par_id)])
!= 0
):
data = track_let[(track_let[:, 0] == i) & (track_let[:, 1] == par_id)][
0
][2:-1]
dnxt = track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)][
0
][2:-1]
result, result_lm, result_y, result_x, result_z = compute_vector(
black,
data,
dnxt,
result,
result_lm,
result_y,
result_x,
result_z,
KS,
Z_VALUE,
SGM,
)
else:
print(
track_let[(track_let[:, 0] == i + itv) & (track_let[:, 1] == j)][0]
)
result = result[KS:-KS, KS:-KS]
print(i + 1, "to", i + itv + 1, result.max())
result_y = result_y[KS:-KS, KS:-KS]
result_x = result_x[KS:-KS, KS:-KS]
result_z = result_z[KS:-KS, KS:-KS]
result_lm = result_lm[KS:-KS, KS:-KS]
result_x = result_x / result
result_y = result_y / result
result_z = result_z / result
result_vec = np.concatenate(
(result_y[:, :, None], result_x[:, :, None], result_z[:, :, None]), axis=-1
)
np.save(str(save_path), result_vec.astype("float16"))
plt.figure(figsize=(1, 1), dpi=1000)
plt.axis("off")
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
plt.imshow(result_vec)
plt.savefig(str(save_path.parent.joinpath(f"{save_path.stem}.png")))
if __name__ == "__main__":
for time_late in [1, 5, 9]:
save_CMP_path = Path(f"/home/kazuya/main/WSCTBFP/data/cmp/{time_late}")
save_CMP_path.mkdir(parents=True, exist_ok=True)
save_mask_path = save_CMP_path.parent.joinpath(f"mask_{time_late}")
save_mask_path.mkdir(parents=True, exist_ok=True)
root_path = Path(f"./output/association/{time_late}")
pred1_paths = sorted(root_path.glob("*/*_1.txt"))
pred2_paths = sorted(root_path.glob("*/*_2.txt"))
for frame, pred_path in enumerate(zip(pred1_paths, pred2_paths)):
# [x, y, cell_id, state]
pred1 = np.loadtxt(str(pred_path[0]), delimiter=",", skiprows=1)
# [x, y, cell_id, state]
pred2 = np.loadtxt(str(pred_path[1]), delimiter=",", skiprows=1)
mask = np.zeros((320, 320))
exclude_cells = pred1[(pred1[:, 3] == 2) | (pred1[:, 3] == 0)]
for exclude_cell in exclude_cells:
mask = cv2.circle(
mask,
(int(exclude_cell[0]), int(exclude_cell[1])),
SGM * 3,
255,
-1,
)
cv2.imwrite(
str(save_mask_path.joinpath(f"{frame:05d}.tif")),
mask.astype(np.uint8),
)
pred1_new = pred1.copy()
pred2_new = pred2.copy()
cell_id = 1
for index, pre in enumerate(pred1):
if pre[3] == 1:
pred1_new[index][2] = cell_id
pred2_new[int(pre[2])][2] = cell_id
cell_id += 1
pred1 = pred1_new
pred2 = pred2_new
pred1 = pred1[pred1[:, 3] == 1]
pred2 = pred2[pred2[:, 3] == 1]
track_let = np.zeros(((pred1.shape[0] + pred2.shape[0], 5)))
track_let[:pred1.shape[0], 0] = 1
track_let[pred1.shape[0]:, 0] = 2
track_let[:pred1.shape[0], 2:4] = pred1[:, :2]
track_let[pred1.shape[0]:, 2:4] = pred2[:, :2]
track_let[:pred1.shape[0], 1] = pred1[:, 2]
track_let[pred1.shape[0]:, 1] = pred2[:, 2]
generate_flow(
track_let,
save_CMP_path.joinpath(f"{frame:05d}.npy"),
height=320,
width=320,
)
print("finished")
|
"""
You are given a string of length `N` and a parameter `k`. The string can be
manipulated by taking one of the first `k` letters and moving it to the end,
with an unlimited number of moves.
For example, if we are given the string `daily` and `k = 1`, then the best we
can do is `ailyd`, since you can only move the first letter to the end.
"""
from collections import namedtuple
def smallest_string(s: str, k: int) -> str:
"""The lexicographical ordering of a string is marked by the cumulative
"score" of all of its letters. For example, aaa < aab and aab < bba, and so
on.
We are given the first k elements of a string to play with, which means that
we have the option of taking any of those letters and moving them to the end
in any order we please. We don't have to move any or all of the letters if
the shift wouldn't yield a smaller string.
If `k == 1`, then our best bet is to place the character with the smallest
lexicographical ordering at the front. This is a simple case: just find the
minimum character and rotate the string until the min character is at the
front of the string. If k > 1, then the best posslbe ordering for the
string is the sorted string.
"""
if k == 0:
return s
elif k > 1:
return "".join(sorted(s))
min_char = min(s)
min_idx = s.find(min_char)
return s[min_idx] + s[min_idx + 1 :] + s[:min_idx]
def test_smallest_string():
TestCase = namedtuple("TestCase", ["input", "expected"])
test_cases = [
TestCase(("daily", 1), "ailyd"),
TestCase(("daily", 2), "adily"),
]
for test_case in test_cases:
assert (
smallest_string(test_case.input[0], test_case.input[1])
== test_case.expected
)
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 Jon Turney
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#
# mkgitoliteconf - creates a gitolite conf file fragment from cygwin-pkg-maint
#
from collections import defaultdict
import argparse
import sys
from . import common_constants
from . import maintainers
#
# transform username to charset acceptable to gitolite
#
def transform_username(name):
name = name.replace('.', '')
name = name.replace(' ', '_')
return name
#
#
#
def do_main(args):
# read maintainer list
mlist = {}
mlist = maintainers.add_packages(mlist, args.pkglist, getattr(args, 'orphanmaint', None))
# make the list of all packages
maintainers.all_packages(mlist)
# invert to a per-package list of maintainers
pkgs = defaultdict(list)
# for each maintainer
for m in mlist.values():
# for each package
for p in m.pkgs:
# add the maintainer name
pkgs[p].append(m.name)
# header
print("# automatically generated by mkgitoliteconf")
# global configuration
print('')
print('@leads = %s' % ' '.join(map(transform_username, common_constants.ORPHANMAINT.split('/'))))
print('')
print('repo @all')
print(' RW = @leads')
print(' RW+ playground$ = @all')
print('# anyone can create, push, rewind or delete the \'playground\' branch')
print(' R = @all')
print(' R = gitweb daemon')
print(' config core.sharedrepository = all')
print(' config uploadpack.allowReachableSHA1InWant = true')
print(' config receive.advertisePushOptions = true')
print(' - VREF/MAX_NEWBIN_SIZE/1024 = @all')
print('# this rejects binary files over the size limit, text files of any size are still permiited')
print(' - VREF/HIGHLANDER/cygport = @all')
print('# this checks for trees which contain more than one .cygport file')
print('')
# for each package
for p in sorted(pkgs):
users = ' '.join(map(transform_username, pkgs[p]))
owner = pkgs[p][0] # first named maintainer
print("repo git/cygwin-packages/%s" % (p))
print("C = %s @leads" % (users))
print("RW = %s" % (users))
print("owner = %s" % (owner))
print("")
#
#
#
def main():
pkglist_default = common_constants.PKGMAINT
parser = argparse.ArgumentParser(description='gitolite rules config generator')
parser.add_argument('--pkglist', action='store', metavar='FILE', help="package maintainer list (default: " + pkglist_default + ")", default=pkglist_default)
(args) = parser.parse_args()
do_main(args)
return 0
#
#
#
if __name__ == "__main__":
sys.exit(main())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Program: diffusionEq2D_ForwardEulerV
Created: Aug 2020
@author: Ryan Clement (RRCC)
scisoft@outlook.com
Purpose: Solve the partial differential equation (PDE)
u_t = alpha * (u_xx + u_yy)
in (0,Lx)x(0,Ly) with vanishing boundary conditions or
u = 0 for x = 0, y in [0,Ly]
u = 0 for x = 1.0, y in [0,Ly]
u = 0 for y = 0, x in [0,Lx]
u = 0 for y = 1.0, x in [0,Lx]
and initial condition
u(x,y,0) = I(x,y) = A*sin(Pi*x/Lx)*sin(Pi*y/Ly)
The analytic solution for this problem is given by
u(x,y,t) = Ae**(-alpha*Pi**2*(Lx**-2 + Ly**-2)*t)*sin(Pi*x/Lx)*sin(Pi*y/Ly)
We will take A=Lx=Ly=1 for this simulation
"""
### IMPORTS
import numpy as np
import matplotlib.pyplot as plt
### FUNCTIONS
def initialCondition(xN,yN,X,Y):
return np.sin(np.pi*X/xN) * np.sin(np.pi*Y/yN)
def analSol(a,xN,yN,X,Y,t):
return np.exp(-a * np.pi**2 * (1.0/xN**2 + 1.0/yN**2) * t) * np.sin(np.pi*X/xN) * np.sin(np.pi*Y/yN)
def plotInitialCondition(a,xN,yN,X,Y):
# fig = plt.figure()
ax = plt.axes(projection="3d")
Z = initialCondition(xN,yN,X,Y)
# ax.plot_wireframe(X,Y,Z,color='green')
ax.plot_surface(X,Y,Z,rstride=1,cstride=1,cmap='hsv',edgecolor='none')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('u(x,y)')
ax.set_title('Initial Condition')
ax.set_zlim(0,1)
plt.show()
def plotSol(X,Y,u,tit):
ax = plt.axes(projection="3d")
# ax.plot_wireframe(X,Y,u,color='green')
ax.plot_surface(X,Y,u,rstride=1,cstride=1,cmap='hsv',edgecolor='none')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('u(x,y)')
ax.set_title(tit)
ax.set_zlim(0,1)
plt.show()
def plotDiff(X,Y,u,tit):
ax = plt.axes(projection="3d")
ax.plot_wireframe(X,Y,u,color='green')
# ax.plot_surface(X,Y,u,rstride=1,cstride=1,cmap='winter',edgecolor='none')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel(r'$\frac{\Delta u}{u}$')
ax.set_title(tit)
plt.show()
def setIC_Loop(a,x,y,u,xPts,yPts,xN,yN):
for j in range(yPts):
for i in range(xPts):
u[i,j] = initialCondition(a, xN, yN, x[i], y[j])
def setIC(xN,yN,X,Y):
u = initialCondition(xN, yN, X, Y)
# Clean up boundary
u[0,:] = 0.0
u[-1,:] = 0.0
u[:,0] = 0.0
u[:,-1] = 0.0
return u
def advanceN(a,xN,yN,u,n,xPts,yPts):
### VARIABLES
x0 = 0.0 # X: Minimum value (Left boundary)
dx = (xN - x0)/(xPts-1) # X: Distance between mesh points
dxs = dx**2
y0 = 0.0 # Y: Minimum value (Bottom boundary)
dy = (yN - y0)/(yPts-1) # Y: Distance between mesh points
dys = dy**2
tsc = 10 # Time step control parameter
dtx = dxs/(tsc*a) # X: Temporal mesh interval
dty = dys/(tsc*a) # Y: Temporal mesh interval
dt = min(dtx, dty)
at = a*dt
dpx = at/dxs # Dimensionless parameter
dpy = at/dys # Dimensionless parameter
uO = np.copy(u) # Previous time-step value array (shallow copy)
for k in range(n):
for j in range(1,yPts-1):
for i in range(1,xPts-1):
u[i,j] = uO[i,j] + \
dpx*(uO[i+1,j] - 2.0*uO[i,j] + uO[i-1,j]) + \
dpy*(uO[i,j+1] - 2.0*uO[i,j] + uO[i,j-1])
# Boundary Condition
u[0,:] = 0.0
u[-1,:] = 0.0
u[:,0] = 0.0
u[:,-1] = 0.0
uO, u = u, uO
return uO,dt
### MAIN
a = 1.0
xN = 1.0
yN = 1.0
xPts = 51
yPts = 51
x = np.linspace(0,xN,xPts)
y = np.linspace(0,yN,yPts)
X, Y = np.meshgrid(x,y)
u = setIC(xN,yN,X,Y)
plotSol(X,Y,u,'Initial Condition')
numSteps = 1000
uS,dt = advanceN(a,xN,yN,u,numSteps,xPts,yPts)
print('Time Step = {:0.5f} s'.format(dt))
time = numSteps*dt
print('Simulation End Time = {:0.5f} s'.format(time))
plotSol(X,Y,uS,'Simulation Time {:.5f} s'.format(time))
uA = analSol(a,xN,yN,X,Y,time)
plotSol(X,Y,uA,'Analytic Time {:.5f} s'.format(time))
uFracDiff = np.copy(uA)
for j in range(yPts):
for i in range(xPts):
ua = uA[i,j]
us = uS[i,j]
if 0.0 == ua or 0.0 == us:
uFracDiff[i,j] = 0.0
else:
uFracDiff[i,j] = ua/us - 1.0
plotDiff(X,Y,uFracDiff,'Fractional Difference')
|
# import the necessary packages
from .simplepreprocessor import SimplePreprocessor
from .imagetoarraypreprocessor import ImageToArrayPreprocessor |
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.common import debug
from ooflib.common.IO import parameter
from ooflib.engine import materialmanager
import types
#Interface branch
from ooflib.engine.IO import interfaceparameters
# MaterialParameter can be set to any existing Material name.
class MaterialParameter(parameter.StringParameter):
def checker(self, x):
if x not in materialmanager.getMaterialNames():
raise TypeError("Expected a Material name!")
# AnyMaterialParameter can be set to any existing Material name, or
# '<Any>' or '<None>'.
class AnyMaterialParameter(parameter.StringParameter):
extranames = ['<Any>', '<None>']
def checker(self, x):
if (x not in materialmanager.getMaterialNames() and
x not in self.extranames):
raise TypeError("Expected a Material name, or '<Any>' or '<None>'")
# ListOfMaterialsParameter can be set to a list of existing Material
# names.
class ListOfMaterialsParameter(parameter.ListOfStringsParameter):
def checker(self, x):
if type(x) is not types.ListType:
raise TypeError("Expected a list of Material names!")
names = materialmanager.getMaterialNames()
for n in x:
if n not in names:
raise TypeError("Expected a list of Material names!")
# MeshMaterialParameter can be set to any Material that's used in a
# Mesh. The Parameter class itself doesn't enforce this, but the
# associated Widget does.
class MeshMaterialParameter(MaterialParameter):
pass
##########################################################################
# Interface branch
class InterfaceMaterialParameter(parameter.StringParameter):
def checker(self, x):
if x not in materialmanager.getInterfaceMaterialNames():
raise TypeError("Expected an Interface Material name!")
#This one has "<Any>" and "<None>" included in the list of
#names of interface materials.
class InterfaceAnyMaterialParameter(parameter.StringParameter):
extranames = ['<Any>', '<None>']
def checker(self, x):
if x not in materialmanager.getInterfaceMaterialNames() and \
x not in self.extranames:
raise TypeError("Expected an Interface Material name!")
#This one includes "<No material>" in the list
class BulkMaterialParameterExtra(parameter.StringParameter):
extranames=[interfaceparameters.NO_MATERIAL_STR,
interfaceparameters.ANY_STR,
interfaceparameters.NORTH_STR,
interfaceparameters.SOUTH_STR,
interfaceparameters.EAST_STR,
interfaceparameters.WEST_STR]
def checker(self, x):
#The checker gets called before the BulkMaterialWidget gets initialized
#when the parameter is part of a registeredclass. Have to supress
#the exception in this case when the material gets deleted.
if x in materialmanager.getMaterialNames():
if x not in materialmanager.getBulkMaterialNames():
raise TypeError("Expected a Bulk Material name!")
class BulkMaterialParameter(parameter.StringParameter):
def checker(self, x):
#The checker gets called before the BulkMaterialWidget gets initialized
#when the parameter is part of a registeredclass. Have to supress
#the exception in this case when the material gets deleted.
if x in materialmanager.getMaterialNames():
if x not in materialmanager.getBulkMaterialNames():
raise TypeError("Expected a Bulk Material name!")
|
"""Implements Load checkpoint."""
from abc import ABC
import torch
from torchflare.callbacks.callback import Callbacks
from torchflare.callbacks.states import CallbackOrder
class LoadCheckpoint(Callbacks, ABC):
"""Class to load checkpoint."""
def __init__(self, path_to_model: str = None):
"""Constructor method for LoadCheckpoint Class."""
super(LoadCheckpoint, self).__init__(order=CallbackOrder.INTERNAL)
self.path = path_to_model
def on_experiment_start(self):
"""Load checkpoint before starting training."""
checkpoint = torch.load(self.path, map_location=torch.device(self.exp.device))
self.exp.model.load_state_dict(checkpoint["model_state_dict"])
self.exp.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
if self.exp.scheduler_stepper is not None:
self.exp.scheduler_stepper.scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
print("Successfully loaded checkpoints.")
|
import pygame
import pyscroll
import pytmx.util_pygame
from player import Joueur
class Game:
def __init__(self):
# creer la fenetre du jeu
self.screen = pygame.display.set_mode((800,600))
pygame.display.set_caption("Pygamon - Adventure")
#charger la carte(tmx)
tmx_data = pytmx.util_pygame.load_pygame('assets/carte.tmx')
map_data = pyscroll.data.TiledMapData(tmx_data)
map_layer = pyscroll.orthographic.BufferedRenderer(map_data, self.screen.get_size())
#generer un joueur
player_position = tmx_data.get_object_by_name("player")
self.player = Joueur(player_position.x, player_position.y)
#deninir une liste pour les collisions
self.walls = []
for obj in tmx_data.objects:
if obj.type == "collision":
self.walls.append(pygame.Rect(obj.x, obj.y, obj.width, obj.height))
#dessiner le groupe de calque
self.groupe = pyscroll.PyscrollGroup(map_layer=map_layer, default_layer=5)
self.groupe.add(self.player)
def handle_input(self):
pressed = pygame.key.get_pressed()
if pressed[pygame.K_UP]:
print("haut")
self.player.move_above()
elif pressed[pygame.K_DOWN]:
print("bas")
self.player.move_below()
elif pressed[pygame.K_LEFT]:
print("gauche")
self.player.move_left()
elif pressed[pygame.K_RIGHT]:
print("droite")
self.player.move_right()
def update(self):
self.groupe.update()
#verification des collisions
for sprite in self.groupe.sprites():
if sprite.feet.collidelist(self.walls) > -1:
sprite.move_back()
def run(self):
clock = pygame.time.Clock()
# creer la boucle du jeu
running = True
while running:
self.player.save_location()
self.handle_input()
self.update()
self.groupe.center(self.player.rect)
self.groupe.draw(self.screen)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
clock.tick(60)
pygame.quit() |
import string
from typing import List
default_censor: List[str] = ["@everyone", "@here"]
def filter_words(original_message: str,
censored_words: List[str] = None) -> str:
if censored_words is None:
censored_words = default_censor
censored_message: str = original_message
for censor in censored_words:
censored_message = censored_message.replace(censor, "#" * len(censor))
return censored_message
|
from django.db import migrations, transaction
from ultimatejobweb.models import Job, Company
class Migration(migrations.Migration):
dependencies = [
('ultimatejobweb', '0002_test_data_company'),
]
def generate_data(apps, schema_editor):
jobs = [('Facebook', 'Production Engineer', 'https://www.facebook.com/careers/v2/jobs/1672813472870915/'),
('Facebook', 'Offensive Security Engineer Intern, Red Team', 'https://www.facebook.com/careers+\
/v2/jobs/712878282607325/'),
('Facebook', 'Internal Audit Manager – Infrastructure', 'https://www.facebook.com/careers/v2/+\
jobs/420212419368641/'),
('Facebook', 'Offensive Security Engineer Intern', 'https://www.facebook.com/careers/v2/jobs/+\
712878282607325/'),
('Facebook', 'Production Engineer', 'https://www.facebook.com/careers/+\
v2/jobs/207984144060781/'),
('Facebook', 'Production Engineer', 'https://www.facebook.com/careers/v2/+\
jobs/773785633436218/'),
('Facebook', 'Production Engineer', 'https://www.facebook.com/careers/v2/+\
jobs/2597607417128369/'),
('Facebook', 'Production Engineer', 'https://www.facebook.com/careers/v2/+\
jobs/536078547072736/'),
('Red Hat', '69710 - Software Development Internship - Red Hat Virtualization,+\
Student Position', 'https://global-redhat.icims.com/jobs/69710/+\
software-development-internship---red-hat-virtualization%2c-student+\
-position/job?hub=7&in_iframe=1'),
('Red Hat', '80964 - Senior DevOps Engineer - TelCo 5G Integration', 'https://global-+\
redhat.icims.com/jobs/80964/senior-devops-engineer---telco-5g-integration+\
/job?hub=7&in_iframe=1'),
('Red Hat', '83546 - Senior Product Security Engineer - DevSecOps Managed+\
Services', 'https://global-redhat.icims.com/jobs/83546/senior-product-security+\
-engineer---devsecops-managed-services/job?hub=7&in_iframe=1'),
('Red Hat', '83285 - CI and DevOps Internship', 'https://global-redhat.icims.com/+\
jobs/83285/ci-and-devops-internship/job?hub=7&in_iframe=1'),
('Red Hat', '69711 - Software Quality Engineering Internship - Local Student+\
Position', 'https://global-redhat.icims.com/jobs/69711/software-quality+\
-engineering-internship---local-student-position/job?hub=7&in_iframe=1'),
('Red Hat', '81168 - Software Quality Engineer - Storage Red Hat Virtualization', 'https://+\
global-redhat.icims.com/jobs/81168/software-quality-engineer---storage+\
-red-hat-virtualization/job?hub=7&in_iframe=1'),
('Red Hat', '70227 - Talent Acquisition Recruiter, Engineering Team - 12 month contract', 'https:+\
//global-redhat.icims.com/jobs/70227/talent-acquisition-recruiter%2c-engineering+\
-team---12-month-contract/job?hub=7&in_iframe=1'),
('Red Hat', '83236 - Principal Software Engineer - Object and Data Services (NooBaa)', 'https:+\
//global-redhat.icims.com/jobs/83236/principal-software-engineer---object-+\
and-data-services-%28noobaa%29/job?hub=7&in_iframe=1'),
('Red Hat', '82632 - Sales Renewals Representative', 'https://global-redhat.icims.com/jobs/+\
82632/sales-renewals-representative/job?hub=7&in_iframe=1'),
('Red Hat', '81944 - Senior Technical Writer', 'https://global-redhat.icims.com/jobs/81944/+\
senior-technical-writer/job?hub=7&in_iframe=1'), ]
with transaction.atomic():
for company_name_data, job_title, description_url in jobs:
company_job = Company.objects.get(company_name=company_name_data)
Job(company=company_job, job_title=job_title, description_url=description_url).save()
operations = [
migrations.RunPython(generate_data),
]
|
#!/usr/bin/env python3
import sys
import numpy as np
import zxntools as zxn
VERSION = "1.00.00"
DATE = "20210407"
NAME = 'imgtosl2'
def my_help(name):
version()
sys.stderr.write(
("Usage: {} [<options>] [<infile>] [<outfile>]\n"
"\toptions are\n"
"\t-2\t--256x192\t256x192 resoultion\n"
"\t-3\t--320x256\t320x192 resoultion\n"
"\t-6\t--640x256\t640x192 resoultion\n"
"\t-d\t--dither\t\tuse dithering (dither)\n"
"\t-h\t--help\t\tshow this help message\n"
"\t-i\t--in\t\tinput file (stdin)\n"
"\t-n\t--nodither\tdon't use dithering (dither)\n"
"\t-o\t--out\t\toutput file (stdout)\n"
"\t-p\t--pal\t\tpalette (P:NEXT)"
"\t-s\t--scale\t\tscale image to fit\n"
"\t-V\t--version\tget version information\n"
"\t-v\t--verbose\tincrease verbosity\n"
).format(name))
def version():
sys.stderr.write("{} version {} {}\n".format(NAME, VERSION, DATE))
zxn.version(True)
DEFAULTS = {'opts': "236dhi:no:p:rsVv",
'long_opts': ['256', '256x192', '320', '320x256', '640', '640x256', 'dither', 'help', 'in=', 'nodither',
'out=', 'pal=', '16', 'radistan', 'scale', 'version', 'verbose'],
'inks': None,
'papers': None,
'pal_type': None,
'num_colors': 256,
'tile_y': None,
'res': (0, 0),
'zxn_fmt': zxn.Options.SL2,
'pal_first': False,
'help': my_help
}
def write_256(img, options):
options.outfile.write(bytes(list(img.getdata())))
def write_320(img, options):
data = np.asarray(img.getdata()).reshape((256, 320))
data = data.transpose().reshape((320 * 256))
options.outfile.write(bytes(list(data)))
def write_640(img, options):
data = np.asarray(img.getdata()).reshape((256, 320, 2)) * np.asarray([16, 1])
data = data.sum(axis=2).transpose().reshape((320 * 256))
options.outfile.write(bytes(list(data)))
def write_palette(img, options):
if options.zxn_fmt == options.NXI or options.auto_palette or options.palette is not None:
zxn.write_pal(img.getpalette(), options.num_colors, options.outfile)
def imgtosl2(img, options):
if options.res == (640, 256):
options.num_colors = 16
else:
options.num_colors = 256
if options.auto_palette:
img = zxn.auto_palette(img, options)
elif options.palette is None:
img = zxn.next_palette(img, options)
else:
img = img.quantize(palette=options.palette, dither=options.dither)
if options.pal_first:
write_palette(img, options)
if options.res == (256, 192):
write_256(img, options)
elif options.res == (320, 256):
write_320(img, options)
else:
write_640(img, options)
if not options.pal_first:
write_palette(img, options)
if __name__ == "__main__":
options = zxn.Options(sys.argv, DEFAULTS)
if options.res == (0, 0):
options.res = (256, 192)
img = zxn.get_image(options)
imgtosl2(img, options)
options.infile.close()
options.outfile.close()
|
#!/usr/bin/python3
nota1 = input("Digite a primeira nota: ")
nota2 = input("Digite a primeira nota: ")
nota3 = input("Digite a primeira nota: ")
nota4 = input("Digite a primeira nota: ")
a = float(nota1)
b = float(nota2)
c = float(nota3)
d = float(nota4)
soma = a + b + c + d
print("A média aritmética é: ", soma / 4)
|
STATUS_CHOICES = [
# first: Stored value, second: Displayed value
('draft', 'Draft'),
('published', 'Published'),
]
def upload_cover_photo_to(instance, filename):
return f"post-photos/{instance.author.username}/{filename}" |
#! /usr/bin/env python
########################################################################
#
# Copyright 2018 cloudmesh.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# License: Apache 2.0
#
########################################################################
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import oyaml as yaml
import os
class Driver(object):
def __init__(self):
self._conf = {}
def config(self, name="~/.cloudmesh/cloudmesh4.yaml"):
name = os.path.expanduser(name)
# reads in the yaml file
with open(name, "r") as stream:
self._conf = yaml.load(stream)
print(yaml.dump(self._conf))
# noinspection PyPep8Naming
def get(self, cloudname=None):
# if cloudname=none get the default cloud
# credentials = ….
# return the driver for that cloud
# now if you do that right you cans implify libcloud use with
if cloudname is None:
cloudname = self._conf.get('cloudmesh').get('default').get('cloud')
conn = None
if cloudname == 'azure':
AZURE_SUBSCRIPTION_ID = self._conf.get('cloudmesh').get('cloud').get('azure').get('credentials').get(
'AZURE_SUBSCRIPTION_ID')
AZURE_MANAGEMENT_CERT_PATH = self._conf.get('cloudmesh').get('cloud').get('azure').get('credentials').get(
'AZURE_MANAGEMENT_CERT_PATH')
AZDriver = get_driver(Provider.AZURE)
conn = AZDriver(subscription_id=AZURE_SUBSCRIPTION_ID, key_file=AZURE_MANAGEMENT_CERT_PATH)
elif cloudname == 'aws':
EC2_ACCESS_ID = self._conf.get('cloudmesh').get('cloud').get('aws').get('credentials').get('EC2_ACCESS_ID')
EC2_SECRET_KEY = self._conf.get('cloudmesh').get('cloud').get('aws').get('credentials').get(
'EC2_SECRET_KEY')
EC2Driver = get_driver(Provider.EC2)
conn = EC2Driver(EC2_ACCESS_ID, EC2_SECRET_KEY)
return conn
if __name__ == '__main__':
cm = Driver()
cm.config()
driver = cm.get("aws")
print("driver=", driver)
# connection = cm.get_driver("azure")
# retrieve available images and sizes
# images = connection.list_images()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 14:05:29 2018
@author: amolsingh
"""
import cv2
import VideoPrep
import numpy as np
import time, os
import matplotlib.pyplot as plt
#%% Main
def stitch(folder):
time1 = time.time()
VideoPrep.vid2imgs(folder)
dir_path = os.path.dirname(folder)
newpath = os.path.join(dir_path, "QuadOutputFrames")
imgs = VideoPrep.findFrames(newpath)
# intimgs = VideoPrep.chooseFrame(newpath)
# imgs=[]
# print(str(len(intimgs)))
# for i in range (0, len(intimgs)):
# paus = intimgs[i]
# img = np.array(paus)
# imgs.append(img)
# for i in range(0, len(imgs)):
# print("count: " + str(i))
# plt.imshow(imgs[i])
# plt.show()
stitcher = cv2.createStitcher(False)
result = np.empty(shape=[2048, 2048])
ret, result = stitcher.stitch(imgs, result)
# cv2.imwrite("/Users/amolsingh/Documents/OhgamiLab/Videos/QuadOutputFrames/QuadStitched.jpg", result)
cv2.imwrite(os.path.join(newpath, "QuadStitched.jpg"), result)
time2 = time.time()
fintime = time2 - time1
print("Time: " + str(fintime))
#%%
#stitch("/Users/amolsingh/Documents/OhgamiLab/Videos/slovid27-12.MOV")
#stitch("/Users/amolsingh/Documents/OhgamiLab/Videos/normvid17-12.MOV")
|
import tflearn
import tensorflow as tf
import cb_utils
import json
words = cb_utils.loadFromFile('words.csv')
classes = cb_utils.loadFromFile('classes.csv')
# reset underlying graph data
tf.reset_default_graph()
# Build neural network
net = tflearn.input_data(shape=[None, len(words)])
net = tflearn.fully_connected(net, cb_utils.nn_width)
net = tflearn.fully_connected(net, cb_utils.nn_width)
net = tflearn.fully_connected(net, len(classes), activation='softmax')
net = tflearn.regression(net)
# Define model and setup tensorboard
model = tflearn.DNN(net)
model.load('models/airline_chatbot_model.tflearn')
cb_utils.predictThis(model, 'I would like to buy a ticket to London')
cb_utils.predictThis(model, 'what time is it')
cb_utils.predictThis(model, 'I need some help')
cb_utils.predictThis(model, 'whats the flight schedule between London and Manchester')
cb_utils.predictThis(model, 'When does the first flight depart')
cb_utils.predictThis(model, 'What time does the flight from London arrive')
cb_utils.predictThis(model, 'I would like a flight to New York please.')
cb_utils.predictThis(model, 'like York flight I would a to New please.')
cb_utils.predictThis(model, 'Thanks for you help.')
cb_utils.predictThis(model, 'Can I pay with a credit card ?') |
import numpy as np
from sklearn.datasets import load_wine
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import classification_report
from sklearn.utils import shuffle
# Set random seed for reproducibility
np.random.seed(1000)
if __name__ == "__main__":
# Load the dataset
wine = load_wine()
X, Y = shuffle(wine['data'], wine['target'], random_state=1000)
nb_samples = X.shape[0]
nb_labeled = 20
nb_unlabeled = nb_samples - nb_labeled
nb_unlabeled_samples = 2
feature_cut = 7
X_unlabeled = X[-nb_unlabeled:]
X_labeled = X[:nb_labeled]
Y_labeled = Y[:nb_labeled]
X_labeled_1 = X_labeled[:, 0:feature_cut]
X_labeled_2 = X_labeled[:, feature_cut:]
# Train a test Naive-Bayes classifier
nb0 = GaussianNB()
nb0.fit(X_labeled, Y_labeled)
# Single NB classification report
print(classification_report(Y, nb0.predict(X), target_names=wine['target_names']))
# Perform the Cotraining procedure
nb1 = None
nb2 = None
while X_labeled_1.shape[0] <= nb_samples:
nb1 = GaussianNB()
nb1.fit(X_labeled_1, Y_labeled)
nb2 = GaussianNB()
nb2.fit(X_labeled_2, Y_labeled)
if X_labeled_1.shape[0] == nb_samples:
break
probs1 = nb1.predict_proba(X_unlabeled[:, 0:feature_cut])
top_confidence_idxs1 = np.argsort(np.max(probs1, axis=1))[::-1]
selected_idxs1 = top_confidence_idxs1[0:nb_unlabeled_samples]
probs2 = nb2.predict_proba(X_unlabeled[:, feature_cut:])
top_confidence_idxs2 = np.argsort(np.max(probs2, axis=1))[::-1]
selected_idxs2 = top_confidence_idxs2[0:nb_unlabeled_samples]
selected_idxs = list(selected_idxs1) + list(selected_idxs2)
X_new_labeled = X_unlabeled[selected_idxs]
X_new_labeled_1 = X_unlabeled[selected_idxs1, 0:feature_cut]
X_new_labeled_2 = X_unlabeled[selected_idxs2, feature_cut:]
Y_new_labeled_1 = nb1.predict(X_new_labeled_1)
Y_new_labeled_2 = nb2.predict(X_new_labeled_2)
X_labeled_1 = np.concatenate((X_labeled_1, X_new_labeled[:, 0:feature_cut]), axis=0)
X_labeled_2 = np.concatenate((X_labeled_2, X_new_labeled[:, feature_cut:]), axis=0)
Y_labeled = np.concatenate((Y_labeled, Y_new_labeled_1, Y_new_labeled_2), axis=0)
X_unlabeled = np.delete(X_unlabeled, selected_idxs, axis=0)
# Print the Cotraining classification reports
print(classification_report(Y, nb1.predict(X[:, 0:feature_cut]), target_names=wine['target_names']))
print(classification_report(Y, nb2.predict(X[:, feature_cut:]), target_names=wine['target_names']))
|
"""
Summary:
Factory class for building the AUnits from an ISIS data file.
This is used to read and build the parts of the ISIS dat file.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
There are a few functions in here that should be made protected. This
doesn't really make much difference in Python in terms of encapsulation,
but it makes it a bit clearer to any calling scripts that they might be
messing with something that they probablly shouldn't be messing with.
Comments are a bit over the top in this file. Need to go through and
decide what is helpful and what is just getting in the way.
Updates:
"""
from __future__ import unicode_literals
import os
from ship.utils.atool import ATool
from ship.utils.fileloaders.loader import ALoader
from ship.utils import filetools as ftools
from ship.fmp.fmpunitfactory import FmpUnitFactory
from ship.utils import utilfunctions as uf
from ship.fmp.datunits.isisunit import UnknownUnit
from ship.fmp.datcollection import DatCollection
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class DatLoader(ATool, ALoader):
"""
Isis data file (.DAT) I/O methods.
Factory for creating the .DAT file objects.
Identifies different section of the .DAT file and creates objects of
the different units. Also saves updated file.
All unknown data within the file is contained within UnkownSection units.
These read in the text as found and write out as found, with no knowledge
of the contents. Effectively bypassing the need to worry about parts that
aren't being used yet.
"""
def __init__(self):
"""Constructor."""
super(DatLoader, self).__init__()
logger.debug('Instantiating DatLoader')
self.cur_no_of_units = 0
self.contents = [] # Contents of dat file
self.temp_unit = None # AUnit
self.is_ied = False # If used to load an .ied file
self._ic_name_types = {}
# reach_info dictionary. Keeps track of the information needed to identify
# reach status. Contains:
# [0] = counter - iterated every time a new reach is started.
# [1] = same reach status - keeps track of whether it's in an existing
# reach or starting a new one.
self.reach_info = {'reach_number': 0, 'same_reach': False}
def loadFile(self, file_path, arg_dict={}):
"""Loads the ISIS .DAT file.
Splits it into objects for each unit type, initial conditions etc.
This is an epic if-else section for each unit type currently
represented.
Needs cleaning up and writing with a bit more style.
Easy to add another unit type, if it's not currently covered then it
will just be collected in the universal 'UnknownUnit' and printed
back out the same as it came in.
Args:
file_path (str): path to the .dat file to load.
Returns:
units - UnitCollection containing the dat file units or False if
they couldn't be loaded.
Raises:
IOError: If the file cannot be loaded or is empty.
AttributeError: if the file is not of an expected type (.dat/.ief).
See Also:
IsisUnitCollection
FactoryClasses
TODO: Decide if the observer style calls are ever going to be needed.
If they aren't then remove them rather than have them
cluttering up the file.
"""
line = ''
# Used to populate the data for the UnknownUnit
self.unknown_data = []
# Composite for all dat units
path_holder = ftools.PathHolder(file_path)
self.units = DatCollection(path_holder)
# self.units.file_dir, self.units.filename = os.path.split(file_path)
# self.units.filename = os.path.splitext(self.units.filename)[0]
if not uf.checkFileType(file_path, ext=['.dat', '.DAT']):
if not uf.checkFileType(file_path, ext=['.ied', '.IED']):
logger.error('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
raise AttributeError('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
else:
self.is_ied = True
contents = self.__loadFile(file_path)
if(contents == False):
raise IOError('Unable to load file at: ' + file_path)
return self.buildDat(contents, arg_dict)
def buildDat(self, contents, arg_dict={}):
"""
"""
self.contents = contents
# Counter for the number of rows that have been read from the
# file contents list.
i = 0
# Get an instance of the unit factory with the number of nodes in the file.
unit_factory = FmpUnitFactory()
# Dictionary containing the keys to identify units in the dat file
unit_vars = unit_factory.getUnitIdentifiers()
# Create a unit from the header data in the first few lines of the dat file.
if not self.is_ied:
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, 0, 'HEADER', 0)
in_unknown_section = False
# Now we can update the HeaderUnit subContents
self.updateSubContents()
in_unknown_section = False
while i < len(self.contents):
# Get the line and then split it to retrieve the first word.
# Check this word against the # unit_type keys we set above to see
line = self.contents[i]
temp_line = line.strip()
if temp_line:
first_word = line.split()[0].strip()
else:
first_word = 'Nothing'
if first_word in unit_vars:
# If building an UnknownUnit then create and reset
if(in_unknown_section == True):
self.createUnknownSection()
self.updateSubContents()
# Reset the reach for the UnknownUnit
unit_factory.same_reach = False
'''Call the unit creator function and get back the unit and the
updated contents list index.
Most of these variables are self explanatory, but
unit_vars[first_word] is the key for the unit type to make.
'''
# i, self.temp_unit = unit_factory.createUnit(self.contents, i,
# unit_vars[first_word], self.cur_no_of_units)
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, i,
first_word,
self.cur_no_of_units)
'''In case we got in but found something wasn't supported.
it's i-1 because we can't return onto the same line that was
read or it will loop forever, so store it here and move on
'''
if self.temp_unit == False:
self.unknown_data.append(self.contents[i].rstrip('\n'))
i += 1
self.unknown_data.append(self.contents[i].rstrip('\n'))
in_unknown_section = True
else:
self.updateSubContents()
in_unknown_section = False
else:
in_unknown_section = True
self.unknown_data.append(self.contents[i].rstrip('\n'))
i += 1
line = None
del self.unknown_data
return self.units
def createUnknownSection(self):
"""Builds unidentified sections from the .DAT file.
All currently un-dealt-with sections of the .DAT file are
incorporated into this.
Loads in chunks of the file 'as-is' and prints them out the same way.
"""
# logger.debug('Creating UnknownUnit - Unit No: ' + str(self.cur_no_of_units))
self.temp_unit = UnknownUnit()
self.temp_unit.readUnitData(self.unknown_data)
def getUnits(self):
"""Getter for imported units
Note:
Deprecated: Will be removed. Please use self.units directly.
Returns:
IsisUnitCollection - The units loaded from the dat file.
"""
return self.units
def updateSubContents(self):
"""Updates the self.units.
Appends the new temp_unit to list of units and resets all the
variables.
"""
#logger.debug('In updateSubContents')
# Don't update node count here as we aren't adding any 'new' nodes
self.units.addUnit(self.temp_unit, update_node_count=False, no_copy=True)
self.cur_no_of_units += 1
del self.temp_unit
self.unknown_data = []
def __loadFile(self, filepath):
"""Load the .dat file into the contents list.
Args:
filepath: Path to the required DAT file.
Returns:
True if loaded ok, False otherwise.
"""
logger.info('loading File: ' + filepath)
contents = []
try:
contents = ftools.getFile(filepath)
except IOError:
logger.error('IOError - Unable to load file')
return False
if(contents == None):
logger.error('.DAT file is empty at: ' + filepath)
return False
return contents
|
import os
import pprint
import sys
sys.path.append("src/")
import hydra
import numpy as np
import pandas as pd
import tqdm
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
@hydra.main(config_path="../../config", config_name="default")
def main(config: DictConfig) -> None:
print("-------------------------------------------------------------------")
pprint.PrettyPrinter(indent=2).pprint(OmegaConf.to_container(config, resolve=True))
train = pd.read_csv(config.competition.train_path)
y = train[config.competition.target_column]
# split
train["Fold"] = 0
kfold = instantiate(config.fold.fold)
for f, (_, valid_index) in enumerate(kfold.split(train, y)):
train.loc[valid_index, "Fold"] = f
path = os.path.join(config.input_dir, config.fold.csv_filename)
train.to_csv(path, index=False)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
'''
enrich_plot: contains a number of functions that use matplotlib to generate visualizations of enrich data. This module is intended to be extensible, containing all enrich-related visuals
'''
__author__ = "Douglas M. Fowler"
__copyright__ = "Copyright 2011"
__credits__ = ["Douglas M Fowler", "Carlos L. Araya"]
__license__ = "FreeBSD"
__version__ = "0.2"
__maintainer__ = "Douglas M. Fowler"
__email__ = "dfowler@uw.edu"
import sys, os, time, math
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as pylab
import matplotlib.cm as cm
import matplotlib.pyplot as plt
except:
print 'Error: Matplotlib not present. Cannot create visuals'
try:
import numpy as np
except:
print 'Error: Numpy not present. Cannot create visuals'
def freqplot(path, infile):
'''
freqplot: takes a mapunlink file and produces a heat map for visualizing diversity
'''
'''
Basic I/O checks - making sure files exist, etc.
'''
try:
# Check to make sure the input file exists
f_infile = open(path + 'data/output/' + infile, 'U')
f_infile.close()
except:
print 'Error: could not open input file'
return 1
try:
plt_test = plt
np_test = np.array
except: # already warned the user about this above, so no message
return 1
f_infile = open(path + 'data/output/' + infile, 'U')
header_line = f_infile.readline().split()
# Make sure we can locate the position column
try:
pos_index = header_line.index('position')
except:
print 'Could not find the position index for plotting. Was the input file labeled properly?'
return 1
'''
Reading in the data
xs: the positions
ys: the AA substitutions/PRO substitutions
zs: frequencies
values: a len(y) x len(x) matrix containing all the z values.
Necessary for plotting the data as a heat map.
'''
xs = []
ys = [index for index in xrange(len(header_line)-1)]
zs = []
# Grab all the data from the file and dump it into xs and zs
for line in f_infile:
line = line.split()
xs.append(int(line[pos_index]))
zs.extend([float(line[index]) for index in xrange(len(line)) if index is not pos_index])
values = np.array(zs)
values = values.reshape(len(xs), len(ys))
values = values.transpose()
'''
Plotting the data
Setting up graphical parameters for pyplot
'''
# Labels and label fonts
xlab = 'Position'
if 'PRO' in infile:
ylab = 'Amino acid'
elif 'DNA' in infile:
ylab = 'Nucleotide'
else:
print 'Could not determine type of data present for plotting a heat map. Is the file name appropriately labeled?'
return 1
zlab = 'Frequency'
title_font = {'fontsize': 18}
# Tick marks - the padding is necessary so heat map boxes are not cut off
plotys = ['']
plotys.extend([header_line[index] for index in xrange(len(header_line)) if index != pos_index])
plotys.append('')
# Plotting the actual data
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(values, cmap=cm.Blues)
# Resizing the figure to be slightly larger
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*1.5, DefaultSize[1]*1.5) )
# Set the labels for the axes
cax.axes.xaxis.set_label_text(xlab, **title_font)
cax.axes.yaxis.set_label_text(ylab, **title_font)
# Plotting the tick marks - the padding is necessary so boxes are not cut off
len_y = [-1]
len_y.extend(range(len(plotys)))
len_y.append(len(plotys))
pylab.yticks(len_y, plotys)
# Drawing the color bar an appropriate size
colorbar_args = {'shrink': 0.62}
pylab.colorbar(cax, **colorbar_args)
'''
Saving
Save the graph out as a pdf to disk
'''
fileout = infile + '_diversity.pdf'
fileout_total = path + 'plots/' + fileout
plt.title(fileout)
pylab.savefig(fileout_total, dpi=100)
return 0
def ratioplot(path, infile1, infile2):
'''
ratioplot: takes two mapunlink files, calculates and calculates the position-averaged ratio of the frequency of mutation between them
'''
'''
Basic I/O checks - making sure files exist, etc.
'''
try: #check to make sure the input file exists
f_infile = open(path + 'data/output/' + infile1, 'U')
f_infile.close()
f_infile = open(path + 'data/output/' + infile2, 'U')
f_infile.close()
except:
print 'Error: could not open input file'
return 1
try:
plt_test = plt
np_test = np.array
except: # already warned the user about this above, so no message
return 1
'''
Reading the data: Calculate the ratio of frequencies for both files.
Assign the ratios appropriate colors based on whether the
ratio is above 1 or <= 1.
file1_values/file2_values: dictionaries mapping {'position': frequency}
'''
f_infile = open(path + 'data/output/' + infile1, 'U')
file1_values = read_position_file(f_infile)
f_infile = open(path + 'data/output/' + infile2, 'U')
file2_values = read_position_file(f_infile)
ratios = []
colors = []
positions = []
for position in file1_values:
x = float(sum(file1_values[position]))
y = float(sum(file2_values[position]))
#rat = (x - position) / (y - position) WHY?
if y != 0:
rat = x / y
else:
rat = 1 #this is highly unsatisfactory, there should be some indication that this data is missing
ratios.append(rat)
positions.append(position)
if (rat <= 1):
colors.append('b')
else:
colors.append('r')
'''
Plotting the data: plot the ratios
'''
fig = plt.figure()
fig.add_subplot(111)
ax = plt.plot( [positions[index] for index in xrange(len(ratios)) if ratios[index] > 1],
[ratios[index] for index in xrange(len(ratios)) if ratios[index] > 1], 'ro')
ax = plt.plot( [positions[index] for index in xrange(len(ratios)) if ratios[index] <= 1],
[ratios[index] for index in xrange(len(ratios)) if ratios[index] <= 1], 'bo')
# Set the labels and other graphical parameters
ylab = 'Position-averaged mutation frequency in selected/input'
xlab = 'Position'
plt.gca().set_xlim([min(positions) - 1, max(positions) + 1])
plt.xlabel(xlab)
plt.ylabel(ylab)
# Draw the vertical lines that connect the baseline to each point
plt.vlines(positions, [1] * len(positions), ratios, color=colors, linestyles='solid', lw=2)
plt.axhline(y=1, color='gray')
# Resize the image to be a little larger
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*1.5, DefaultSize[1]*1.5) )
# Draw the title manually because otherwise the default offset doesn't work
fileout = infile1 + "_" + infile2 + '_position_enrichment.pdf'
fig.text(0.52, 0.97, fileout, ha="center", va="center", size="medium")
'''
Saving to disk
'''
fileout_total = path + 'plots/' + fileout
pylab.savefig(fileout_total, dpi=100)
return 0
def read_position_file(f_infile):
'''
reads a file into a dictionary mapping positions -> frequencies
'''
header = f_infile.readline().split()
position_index = header.index('position')
file_values = {}
for line in f_infile:
line = line.split()
file_values[int(line[position_index])] = [float(line[index]) for index in xrange(len(line)) if index is not position_index]
f_infile.close()
return file_values
def read_substitution_file(f_infile):
'''
Reads in a file mapping
'''
header = f_infile.readline().split()
position_index = header.index('position')
file_values = {'positions' : []}
for index in xrange(len(header)):
if index is not position_index:
file_values[header[index]] = {'values': []}
for line in f_infile:
line = line.split()
[file_values['positions'].append(int(line[position_index]))]
[file_values[header[index]]['values'].append(float(line[index])) for index in xrange(len(line)) if index is not position_index]
f_infile.close()
return file_values
def separate_data(substitution, positions, ratio, wt_data, axis_num, cols, rows):
'''
Another small helper function to follow DRY principles with the AA plot below.
'''
values = ratio[substitution]
xr = xrange(len(values))
# Separate out the values based on their category
wt_x = [positions[index] for index in xr if (values[index] == 0 and wt_data['variable'][index] == substitution)]
wt_y = [values[index] for index in xr if (values[index] == 0 and wt_data['variable'][index] == substitution)]
nan_x = [positions[index] for index in xr if (values[index] == 0 and wt_data['variable'][index] != substitution)]
nan_y = [values[index] for index in xr if (values[index] == 0 and wt_data['variable'][index] != substitution)]
rest_x = [positions[index] for index in xr if values[index] != 0]
rest_y = [values[index] for index in xr if values[index] != 0]
ax = plt.subplot(rows, cols, axis_num)
header_tick_values = range(0, len(values), (len(values)/4))
all_residue_plot_data(wt_x, wt_y, nan_x, nan_y, rest_x, rest_y)
adjust_axes(ax, str(substitution), header_tick_values, header_tick_values, axis_num, cols, 18)
def AAPlot(wt_data, ratio, axis_num, positions):
'''
Takes in ratios and maps how values should be plotted. Residues are
plotted based on 3 categories: WT residues, residues with ratios of 1,
and all other residues.
Since this is a faux lattice plot, each substitution must be iterated through.
'''
rows = int(math.ceil(float(len(ratio.keys())) / 5.0))
cols = 5
keys = ratio.keys()
keys = sorted(keys)
for substitution in keys:
if (substitution != '*'): # Save the stop index for last
axis_num += 1
# a : {0, 1, 2, 0, ... }
separate_data(substitution, positions, ratio, wt_data, axis_num, cols, rows)
'''
Plotting the final substitution, the stop codon/index
'''
substitution = '*'
axis_num += 1
separate_data(substitution, positions, ratio, wt_data, axis_num, cols, rows)
def PosPlot(wt_data, ratio, axis_num, positions):
'''
Takes in ratios and maps how values should be plotted. Positions are
plotted based on 3 categories: WT residues, residues with ratios of 1,
and all other residues.
Rather than re-read the data based on positions instead of substitutions, it makes sense to just iterate through the substitution dictionary and gather the necessary data.
'''
position_d = {'header' : []}
'''
Dealing with the stop position/codon, as well as mapping positions -> frequencies
'''
aa_sorted = ratio.keys()
aa_sorted.sort()
aa_sorted.pop(0)
aa_sorted.append('*')
for substitution in aa_sorted:
if (substitution != '*'):
position_d['header'].append(substitution)
for index in xrange(len(ratio[substitution])):
if position_d.get(index,None) == None:
position_d[index] = []
position_d[index].append(ratio[substitution][index])
position_d['header'].append('*')
for index in xrange(len(ratio['*'])):
if position_d.get(index, None) == None:
position_d[index] = []
position_d[index].append(ratio['*'][index])
rows = int(math.ceil(float(len(position_d.keys())) / 5.0))
cols = 5
keys = [key for key in position_d if key != 'header']
keys.sort()
for position in keys:
axis_num += 1
# 1: {0.1, ...}
values = position_d[position]
xr = xrange(len(values))
'''
Separate categorizing from the AA plot above because we're indexing on position.
'''
wt_x = [positions[index] for index in xr if (values[index] == 0 and wt_data['variable'][position] == position_d['header'][index])]
wt_y = [values[index] for index in xr if (values[index] == 0 and wt_data['variable'][position] == position_d['header'][index])]
nan_x = [positions[index] for index in xr if (values[index] == 0 and wt_data['variable'][position] != position_d['header'][index])]
nan_y = [values[index] for index in xr if (values[index] == 0 and wt_data['variable'][position] != position_d['header'][index])]
rest_x = [positions[index] for index in xr if values[index] != 0]
rest_y = [values[index] for index in xr if values[index] != 0]
ax = plt.subplot(rows, cols, axis_num)
all_residue_plot_data(wt_x, wt_y, nan_x, nan_y, rest_x, rest_y)
adjust_axes(ax, str(position), range(len(position_d['header'])), position_d['header'], axis_num, cols, 13)
def adjust_axes(ax, title, header_ticks, header_list, axis_num, cols, axesfontsize):
'''
A small helper function that adjusts the axes on the plots
'''
frame = plt.gca()
axes_font = {'fontsize':axesfontsize}
title_font = {'fontsize':18}
plt.text(0.1,3, title, **title_font)
plt.axhline()
if (axis_num <= cols) and (axis_num % 2 == 0):
frame.xaxis.set_ticks_position('top')
frame.xaxis.set_label_position('top')
frame.xaxis.tick_top()
ax.set_xticks(header_ticks)
ax.set_xticklabels(header_list, **axes_font)
else:
frame.axes.get_xaxis().set_visible(False)
if (((axis_num - 1) / cols) % 2 == 0) and axis_num % cols == 1:
ax.set_yticks((-4,-2,0,2,4))
ax.set_yticklabels([-4,-2,0,2,4], **axes_font)
else:
frame.axes.get_yaxis().set_visible(False)
def all_residue_plot_data(wt_x, wt_y, nan_x, nan_y, rest_x, rest_y):
'''
Another small helper function that assists with plotting the actual data.
'''
plt.plot(wt_x, wt_y, "rs")
plt.plot(nan_x, nan_y, color="gray", marker="s")
plt.plot(rest_x, rest_y, "bo")
plt.ylim(-4, 4)
xs = wt_x
xs.extend(nan_x)
xs.extend(rest_x)
plt.xlim(min(xs) - 1, max(xs) + 1)
def all_residue_plot(path, infile1, infile2, wtseq, mode):
'''
all_residue_plot: takes two mapunlink files, calculates and calculates the ratio of the frequency of mutation between them for each mutation-position combination
'''
'''
Basic I/O checks - check that files exist, etc.
'''
try:
# Check to make sure the input file exists
f_infile = open(path + 'data/output/' + infile1, 'U')
f_infile.close()
f_infile = open(path + 'data/output/' + infile2, 'U')
f_infile.close()
except:
print 'Error: could not open input file'
return 1
try:
plt_test = plt
np_test = np.array
except: # already warned the user about this above, so no message
return 1
'''
Read in the data
file1_values/file2_values: a dictionary maaping {'position': 'frequency'}
wt_data: a dictionary containing the wildtype positions and AA/proteins
'''
f_infile = open(path + 'data/output/' + infile1, 'U')
file1_values = read_substitution_file(f_infile)
f_infile = open(path + 'data/output/' + infile2, 'U')
file2_values = read_substitution_file(f_infile)
# Obtain wildtype residues and set at the enrichment ratio for the wildtype sequence:
wt_data = {'position': [], 'variable': []}
for position in xrange(len(wtseq)):
wt_data['position'].append(position)
wt_data['variable'].append(wtseq[position])
'''
Calculating the log2 enrichment ratio for each mutation
'''
ratio = {}
positions = file1_values['positions']
for substitution in file1_values:
if substitution != 'positions':
f1p = file1_values[substitution] # Caching for faster lookup
f2p = file2_values[substitution]
if (ratio.get(substitution, None) == None):
ratio[substitution] = []
for index in xrange(len(f1p['values'])):
if f2p['values'][index] != 0.0 and f1p['values'][index] != 0.0:
ratio[substitution].append(math.log(f1p['values'][index] / f2p['values'][index],2))
else:
ratio[substitution].append(0)
'''
Setting up the graph to accomodate multiple sub plots and look similar
to a lattice plot
'''
axis_num = 0
fig = pylab.figure()
fig.subplots_adjust(wspace=0.0001, hspace=0.0001)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*2.5, DefaultSize[1]*2.5) )
'''
Plot the data - see individual functions for more details
'''
ylab = ""
if mode is 'AA':
AAPlot(wt_data, ratio, axis_num, positions)
ylab = "Position"
elif mode is 'Pos':
PosPlot(wt_data, ratio, axis_num, positions)
ylab = "Amino Acid Substitution"
# Drawing axis labels and other titles
fileout = infile1 + "_" + infile2 + '_all_residue_by_' + mode + '.pdf'
fileout_total = path + 'plots/' + fileout
fig.text(0.1, 0.52, "log2(Mutation frequency in selected/input)", ha="right", va="center", size="xx-large", rotation="vertical")
fig.text(0.52, 0.08, ylab, ha="center", va="bottom", size="xx-large")
fig.text(0.52, 0.99, fileout, ha="center", va="top", size="xx-large")
'''
Saving to disk
'''
pylab.savefig(fileout_total, dpi=100)
return 0
|
import os
import uuid
from django.db.models.fields.files import FileField, ImageField, ImageFieldFile, FieldFile
try:
from django.urls import reverse_lazy
except ImportError:
from django.core.urlresolvers import reverse_lazy
PROTECTION_METHODS = ['basic', 'nginx', 'lighttpd', 'apache']
class PrivateFieldFile(FieldFile):
def _get_url(self):
self._require_file()
app_label = self.instance._meta.app_label
model_name = self.instance._meta.object_name.lower()
field_name = self.field.name
pk = self.instance.pk
filename = os.path.basename(self.path)
url = reverse_lazy('private_files-file', args=[app_label, model_name, field_name, pk, filename])
if self.field.single_use:
from django.core.cache import cache
access_key = uuid.uuid4().hex
cache.set(access_key, '%s-%s-%s-%s-%s' % (app_label, model_name, field_name, pk, filename), 3600)
url += '?access-key=' + access_key
return url
url = property(_get_url)
def _get_contidion(self):
return self.field.condition
condition = property(_get_contidion)
def _get_attachment(self):
return self.field.attachment
attachment = property(_get_attachment)
def _get_single_use(self):
return self.field.single_use
single_use = property(_get_single_use)
def is_user_authenticated(request, instance):
return (not request.user.is_anonymous) and request.user.is_authenticated
class PrivateFileField(FileField):
attr_class = PrivateFieldFile
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, condition=is_user_authenticated,
attachment=True, single_use=False, **kwargs):
super(PrivateFileField, self).__init__(verbose_name, name, upload_to, storage, **kwargs)
self.condition = condition
self.attachment = attachment
self.single_use = single_use
|
from . import temperature_manager
from . import temperature_monitor
__all__ = ['temperature_manager', 'temperature_monitor']
|
# Generated by Django 3.1 on 2020-11-20 09:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sample', '0009_auto_20201120_0845'),
]
operations = [
migrations.AddField(
model_name='districtminorwork',
name='metadata',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='sample.districtminorworkmetadata'),
),
]
|
# -*- coding: utf-8 -*-
# Copyright (C) 1999-2002 Joel Rosdahl
# Copyright © 2011-2013 Jason R. Coombs
"""
Internet Relay Chat (IRC) protocol client library.
This library is intended to encapsulate the IRC protocol at a quite
low level. It provides an event-driven IRC client framework. It has
a fairly thorough support for the basic IRC protocol, CTCP, DCC chat,
but DCC file transfers is not yet supported.
In order to understand how to make an IRC client, I'm afraid you more
or less must understand the IRC specifications. They are available
here: [IRC specifications].
The main features of the IRC client framework are:
* Abstraction of the IRC protocol.
* Handles multiple simultaneous IRC server connections.
* Handles server PONGing transparently.
* Messages to the IRC server are done by calling methods on an IRC
connection object.
* Messages from an IRC server triggers events, which can be caught
by event handlers.
* Reading from and writing to IRC server sockets are normally done
by an internal select() loop, but the select()ing may be done by
an external main loop.
* Functions can be registered to execute at specified times by the
event-loop.
* Decodes CTCP tagging correctly (hopefully); I haven't seen any
other IRC client implementation that handles the CTCP
specification subtilties.
* A kind of simple, single-server, object-oriented IRC client class
that dispatches events to instance methods is included.
Current limitations:
* The IRC protocol shines through the abstraction a bit too much.
* Data is not written asynchronously to the server, i.e. the write()
may block if the TCP buffers are stuffed.
* There are no support for DCC file transfers.
* The author haven't even read RFC 2810, 2811, 2812 and 2813.
* Like most projects, documentation is lacking...
.. [IRC specifications] http://www.irchelp.org/irchelp/rfc/
"""
from __future__ import absolute_import, division
import bisect
import re
import select
import socket
import string
import time
import struct
import logging
import threading
import abc
import collections
import functools
import itertools
import six
try:
import pkg_resources
except ImportError:
pass
from . import connection
from . import events
from . import functools as irc_functools
from . import strings
from . import util
from . import buffer
from . import schedule
from . import features
log = logging.getLogger(__name__)
# set the version tuple
try:
VERSION_STRING = pkg_resources.require('irc')[0].version
VERSION = tuple(int(res) for res in re.findall('\d+', VERSION_STRING))
except Exception:
VERSION_STRING = 'unknown'
VERSION = ()
# TODO
# ----
# (maybe) color parser convenience functions
# documentation (including all event types)
# (maybe) add awareness of different types of ircds
# send data asynchronously to the server (and DCC connections)
# (maybe) automatically close unused, passive DCC connections after a while
# NOTES
# -----
# connection.quit() only sends QUIT to the server.
# ERROR from the server triggers the error event and the disconnect event.
# dropping of the connection triggers the disconnect event.
class IRCError(Exception):
"An IRC exception"
class InvalidCharacters(ValueError):
"Invalid characters were encountered in the message"
class MessageTooLong(ValueError):
"Message is too long"
class PrioritizedHandler(
collections.namedtuple('Base', ('priority', 'callback'))):
def __lt__(self, other):
"when sorting prioritized handlers, only use the priority"
return self.priority < other.priority
class IRC(object):
"""Class that handles one or several IRC server connections.
When an IRC object has been instantiated, it can be used to create
Connection objects that represent the IRC connections. The
responsibility of the IRC object is to provide an event-driven
framework for the connections and to keep the connections alive.
It runs a select loop to poll each connection's TCP socket and
hands over the sockets with incoming data for processing by the
corresponding connection.
The methods of most interest for an IRC client writer are server,
add_global_handler, remove_global_handler, execute_at,
execute_delayed, execute_every, process_once, and process_forever.
Here is an example:
client = irc.client.IRC()
server = client.server()
server.connect("irc.some.where", 6667, "my_nickname")
server.privmsg("a_nickname", "Hi there!")
client.process_forever()
This will connect to the IRC server irc.some.where on port 6667
using the nickname my_nickname and send the message "Hi there!"
to the nickname a_nickname.
The methods of this class are thread-safe; accesses to and modifications of
its internal lists of connections, handlers, and delayed commands
are guarded by a mutex.
"""
def __do_nothing(*args, **kwargs):
pass
def __init__(self, on_connect=__do_nothing, on_disconnect=__do_nothing,
on_schedule=__do_nothing):
"""Constructor for IRC objects.
on_connect: optional callback invoked when a new connection
is made.
on_disconnect: optional callback invoked when a socket is
disconnected.
on_schedule: optional callback, usually supplied by an external
event loop, to indicate in float seconds that the client needs to
process events that many seconds in the future. An external event
loop will implement this callback to schedule a call to
process_timeout.
The three arguments mainly exist to be able to use an external
main loop (for example Tkinter's or PyGTK's main app loop)
instead of calling the process_forever method.
An alternative is to just call ServerConnection.process_once()
once in a while.
"""
self._on_connect = on_connect
self._on_disconnect = on_disconnect
self._on_schedule = on_schedule
self.connections = []
self.handlers = {}
self.delayed_commands = [] # list of DelayedCommands
# Modifications to these shared lists and dict need to be thread-safe
self.mutex = threading.RLock()
self.add_global_handler("ping", _ping_ponger, -42)
def server(self):
"""Creates and returns a ServerConnection object."""
c = ServerConnection(self)
with self.mutex:
self.connections.append(c)
return c
def process_data(self, sockets):
"""Called when there is more data to read on connection sockets.
Arguments:
sockets -- A list of socket objects.
See documentation for IRC.__init__.
"""
with self.mutex:
log.log(logging.DEBUG-2, "process_data()")
for s, c in itertools.product(sockets, self.connections):
if s == c.socket:
c.process_data()
def process_timeout(self):
"""Called when a timeout notification is due.
See documentation for IRC.__init__.
"""
with self.mutex:
while self.delayed_commands:
command = self.delayed_commands[0]
if not command.due():
break
command.function()
if isinstance(command, schedule.PeriodicCommand):
self._schedule_command(command.next())
del self.delayed_commands[0]
def process_once(self, timeout=0):
"""Process data from connections once.
Arguments:
timeout -- How long the select() call should wait if no
data is available.
This method should be called periodically to check and process
incoming data, if there are any. If that seems boring, look
at the process_forever method.
"""
with self.mutex:
log.log(logging.DEBUG-2, "process_once()")
sockets = [x.socket for x in self.connections if x is not None]
sockets = [x for x in sockets if x is not None]
if sockets:
(i, o, e) = select.select(sockets, [], [], timeout)
self.process_data(i)
else:
time.sleep(timeout)
self.process_timeout()
def process_forever(self, timeout=0.2):
"""Run an infinite loop, processing data from connections.
This method repeatedly calls process_once.
Arguments:
timeout -- Parameter to pass to process_once.
"""
# This loop should specifically *not* be mutex-locked.
# Otherwise no other thread would ever be able to change
# the shared state of an IRC object running this function.
log.debug("process_forever(timeout=%s)", timeout)
while 1:
self.process_once(timeout)
def disconnect_all(self, message=""):
"""Disconnects all connections."""
with self.mutex:
for c in self.connections:
c.disconnect(message)
def add_global_handler(self, event, handler, priority=0):
"""Adds a global handler function for a specific event type.
Arguments:
event -- Event type (a string). Check the values of
numeric_events for possible event types.
handler -- Callback function taking 'connection' and 'event'
parameters.
priority -- A number (the lower number, the higher priority).
The handler function is called whenever the specified event is
triggered in any of the connections. See documentation for
the Event class.
The handler functions are called in priority order (lowest
number is highest priority). If a handler function returns
"NO MORE", no more handlers will be called.
"""
handler = PrioritizedHandler(priority, handler)
with self.mutex:
event_handlers = self.handlers.setdefault(event, [])
bisect.insort(event_handlers, handler)
def remove_global_handler(self, event, handler):
"""Removes a global handler function.
Arguments:
event -- Event type (a string).
handler -- Callback function.
Returns 1 on success, otherwise 0.
"""
with self.mutex:
if not event in self.handlers:
return 0
for h in self.handlers[event]:
if handler == h.callback:
self.handlers[event].remove(h)
return 1
def execute_at(self, at, function, arguments=()):
"""Execute a function at a specified time.
Arguments:
at -- Execute at this time (standard "time_t" time).
function -- Function to call.
arguments -- Arguments to give the function.
"""
function = functools.partial(function, *arguments)
command = schedule.DelayedCommand.at_time(at, function)
self._schedule_command(command)
def execute_delayed(self, delay, function, arguments=()):
"""
Execute a function after a specified time.
delay -- How many seconds to wait.
function -- Function to call.
arguments -- Arguments to give the function.
"""
function = functools.partial(function, *arguments)
command = schedule.DelayedCommand.after(delay, function)
self._schedule_command(command)
def execute_every(self, period, function, arguments=()):
"""
Execute a function every 'period' seconds.
period -- How often to run (always waits this long for first).
function -- Function to call.
arguments -- Arguments to give the function.
"""
function = functools.partial(function, *arguments)
command = schedule.PeriodicCommand.after(period, function)
self._schedule_command(command)
def _schedule_command(self, command):
with self.mutex:
bisect.insort(self.delayed_commands, command)
self._on_schedule(util.total_seconds(command.delay))
def dcc(self, dcctype="chat"):
"""Creates and returns a DCCConnection object.
Arguments:
dcctype -- "chat" for DCC CHAT connections or "raw" for
DCC SEND (or other DCC types). If "chat",
incoming data will be split in newline-separated
chunks. If "raw", incoming data is not touched.
"""
with self.mutex:
c = DCCConnection(self, dcctype)
self.connections.append(c)
return c
def _handle_event(self, connection, event):
"""
Handle an Event event incoming on ServerConnection connection.
"""
with self.mutex:
h = self.handlers
matching_handlers = sorted(
h.get("all_events", []) +
h.get(event.type, [])
)
for handler in matching_handlers:
result = handler.callback(connection, event)
if result == "NO MORE":
return
def _remove_connection(self, connection):
"""[Internal]"""
with self.mutex:
self.connections.remove(connection)
self._on_disconnect(connection.socket)
_rfc_1459_command_regexp = re.compile("^(:(?P<prefix>[^ ]+) +)?(?P<command>[^ ]+)( *(?P<argument> .+))?")
class Connection(object):
"""
Base class for IRC connections.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def socket(self):
"The socket for this connection"
def __init__(self, irclibobj):
self.irclibobj = irclibobj
##############################
### Convenience wrappers.
def execute_at(self, at, function, arguments=()):
self.irclibobj.execute_at(at, function, arguments)
def execute_delayed(self, delay, function, arguments=()):
self.irclibobj.execute_delayed(delay, function, arguments)
def execute_every(self, period, function, arguments=()):
self.irclibobj.execute_every(period, function, arguments)
class ServerConnectionError(IRCError):
pass
class ServerNotConnectedError(ServerConnectionError):
pass
class ServerConnection(Connection):
"""
An IRC server connection.
ServerConnection objects are instantiated by calling the server
method on an IRC object.
"""
buffer_class = buffer.DecodingLineBuffer
socket = None
def __init__(self, irclibobj):
super(ServerConnection, self).__init__(irclibobj)
self.connected = False
self.features = features.FeatureSet()
# save the method args to allow for easier reconnection.
@irc_functools.save_method_args
def connect(self, server, port, nickname, password=None, username=None,
ircname=None, connect_factory=connection.Factory()):
"""Connect/reconnect to a server.
Arguments:
server -- Server name.
port -- Port number.
nickname -- The nickname.
password -- Password (if any).
username -- The username.
ircname -- The IRC name ("realname").
server_address -- The remote host/port of the server.
connect_factory -- A callable that takes the server address and
returns a connection (with a socket interface).
This function can be called to reconnect a closed connection.
Returns the ServerConnection object.
"""
log.debug("connect(server=%r, port=%r, nickname=%r, ...)", server,
port, nickname)
if self.connected:
self.disconnect("Changing servers")
self.buffer = self.buffer_class()
self.handlers = {}
self.real_server_name = ""
self.real_nickname = nickname
self.server = server
self.port = port
self.server_address = (server, port)
self.nickname = nickname
self.username = username or nickname
self.ircname = ircname or nickname
self.password = password
self.connect_factory = connect_factory
try:
self.socket = self.connect_factory(self.server_address)
except socket.error as err:
raise ServerConnectionError("Couldn't connect to socket: %s" % err)
self.connected = True
self.irclibobj._on_connect(self.socket)
# Log on...
if self.password:
self.pass_(self.password)
self.nick(self.nickname)
self.user(self.username, self.ircname)
return self
def reconnect(self):
"""
Reconnect with the last arguments passed to self.connect()
"""
self.connect(*self._saved_connect.args, **self._saved_connect.kwargs)
def close(self):
"""Close the connection.
This method closes the connection permanently; after it has
been called, the object is unusable.
"""
# Without this thread lock, there is a window during which
# select() can find a closed socket, leading to an EBADF error.
with self.irclibobj.mutex:
self.disconnect("Closing object")
self.irclibobj._remove_connection(self)
def get_server_name(self):
"""Get the (real) server name.
This method returns the (real) server name, or, more
specifically, what the server calls itself.
"""
if self.real_server_name:
return self.real_server_name
else:
return ""
def get_nickname(self):
"""Get the (real) nick name.
This method returns the (real) nickname. The library keeps
track of nick changes, so it might not be the nick name that
was passed to the connect() method. """
return self.real_nickname
def process_data(self):
"read and process input from self.socket"
try:
reader = getattr(self.socket, 'read', self.socket.recv)
new_data = reader(2 ** 14)
except socket.error:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
self.buffer.feed(new_data)
# process each non-empty line after logging all lines
for line in self.buffer:
log.debug("FROM SERVER: %s", line)
if not line: continue
self._process_line(line)
def _process_line(self, line):
prefix = None
command = None
arguments = None
self._handle_event(Event("all_raw_messages",
self.get_server_name(),
None,
[line]))
m = _rfc_1459_command_regexp.match(line)
if m.group("prefix"):
prefix = m.group("prefix")
if not self.real_server_name:
self.real_server_name = prefix
if m.group("command"):
command = m.group("command").lower()
if m.group("argument"):
a = m.group("argument").split(" :", 1)
arguments = a[0].split()
if len(a) == 2:
arguments.append(a[1])
# Translate numerics into more readable strings.
command = events.numeric.get(command, command)
if command == "nick":
if NickMask(prefix).nick == self.real_nickname:
self.real_nickname = arguments[0]
elif command == "welcome":
# Record the nickname in case the client changed nick
# in a nicknameinuse callback.
self.real_nickname = arguments[0]
elif command == "featurelist":
self.features.load(arguments)
if command in ["privmsg", "notice"]:
target, message = arguments[0], arguments[1]
messages = _ctcp_dequote(message)
if command == "privmsg":
if is_channel(target):
command = "pubmsg"
else:
if is_channel(target):
command = "pubnotice"
else:
command = "privnotice"
for m in messages:
if isinstance(m, tuple):
if command in ["privmsg", "pubmsg"]:
command = "ctcp"
else:
command = "ctcpreply"
m = list(m)
log.debug("command: %s, source: %s, target: %s, "
"arguments: %s", command, prefix, target, m)
self._handle_event(Event(command, NickMask(prefix), target, m))
if command == "ctcp" and m[0] == "ACTION":
self._handle_event(Event("action", prefix, target, m[1:]))
else:
log.debug("command: %s, source: %s, target: %s, "
"arguments: %s", command, prefix, target, [m])
self._handle_event(Event(command, NickMask(prefix), target, [m]))
else:
target = None
if command == "quit":
arguments = [arguments[0]]
elif command == "ping":
target = arguments[0]
else:
target = arguments[0]
arguments = arguments[1:]
if command == "mode":
if not is_channel(target):
command = "umode"
log.debug("command: %s, source: %s, target: %s, "
"arguments: %s", command, prefix, target, arguments)
self._handle_event(Event(command, NickMask(prefix), target, arguments))
def _handle_event(self, event):
"""[Internal]"""
self.irclibobj._handle_event(self, event)
if event.type in self.handlers:
for fn in self.handlers[event.type]:
fn(self, event)
def is_connected(self):
"""Return connection status.
Returns true if connected, otherwise false.
"""
return self.connected
def add_global_handler(self, *args):
"""Add global handler.
See documentation for IRC.add_global_handler.
"""
self.irclibobj.add_global_handler(*args)
def remove_global_handler(self, *args):
"""Remove global handler.
See documentation for IRC.remove_global_handler.
"""
self.irclibobj.remove_global_handler(*args)
def action(self, target, action):
"""Send a CTCP ACTION command."""
self.ctcp("ACTION", target, action)
def admin(self, server=""):
"""Send an ADMIN command."""
self.send_raw(" ".join(["ADMIN", server]).strip())
def cap(self, subcommand, *args):
"""
Send a CAP command according to `the spec
<http://ircv3.atheme.org/specification/capability-negotiation-3.1>`_.
Arguments:
subcommand -- LS, LIST, REQ, ACK, CLEAR, END
args -- capabilities, if required for given subcommand
Example:
.cap('LS')
.cap('REQ', 'multi-prefix', 'sasl')
.cap('END')
"""
cap_subcommands = set('LS LIST REQ ACK NAK CLEAR END'.split())
client_subcommands = set(cap_subcommands) - set('NAK')
assert subcommand in client_subcommands, "invalid subcommand"
def _multi_parameter(args):
"""
According to the spec::
If more than one capability is named, the RFC1459 designated
sentinel (:) for a multi-parameter argument must be present.
It's not obvious where the sentinel should be present or if it must
be omitted for a single parameter, so follow convention and only
include the sentinel prefixed to the first parameter if more than
one parameter is present.
"""
if len(args) > 1:
return (':' + args[0],) + args[1:]
return args
args = _multi_parameter(args)
self.send_raw(' '.join(('CAP', subcommand) + args))
def ctcp(self, ctcptype, target, parameter=""):
"""Send a CTCP command."""
ctcptype = ctcptype.upper()
self.privmsg(target, "\001%s%s\001" % (ctcptype, parameter and (" " + parameter) or ""))
def ctcp_reply(self, target, parameter):
"""Send a CTCP REPLY command."""
self.notice(target, "\001%s\001" % parameter)
def disconnect(self, message=""):
"""Hang up the connection.
Arguments:
message -- Quit message.
"""
if not self.connected:
return
self.connected = 0
self.quit(message)
try:
self.socket.shutdown(socket.SHUT_WR)
self.socket.close()
except socket.error:
pass
del self.socket
self._handle_event(Event("disconnect", self.server, "", [message]))
def globops(self, text):
"""Send a GLOBOPS command."""
self.send_raw("GLOBOPS :" + text)
def info(self, server=""):
"""Send an INFO command."""
self.send_raw(" ".join(["INFO", server]).strip())
def invite(self, nick, channel):
"""Send an INVITE command."""
self.send_raw(" ".join(["INVITE", nick, channel]).strip())
def ison(self, nicks):
"""Send an ISON command.
Arguments:
nicks -- List of nicks.
"""
self.send_raw("ISON " + " ".join(nicks))
def join(self, channel, key=""):
"""Send a JOIN command."""
self.send_raw("JOIN %s%s" % (channel, (key and (" " + key))))
def kick(self, channel, nick, comment=""):
"""Send a KICK command."""
self.send_raw("KICK %s %s%s" % (channel, nick, (comment and (" :" + comment))))
def links(self, remote_server="", server_mask=""):
"""Send a LINKS command."""
command = "LINKS"
if remote_server:
command = command + " " + remote_server
if server_mask:
command = command + " " + server_mask
self.send_raw(command)
def list(self, channels=None, server=""):
"""Send a LIST command."""
command = "LIST"
if channels:
command = command + " " + ",".join(channels)
if server:
command = command + " " + server
self.send_raw(command)
def lusers(self, server=""):
"""Send a LUSERS command."""
self.send_raw("LUSERS" + (server and (" " + server)))
def mode(self, target, command):
"""Send a MODE command."""
self.send_raw("MODE %s %s" % (target, command))
def motd(self, server=""):
"""Send an MOTD command."""
self.send_raw("MOTD" + (server and (" " + server)))
def names(self, channels=None):
"""Send a NAMES command."""
self.send_raw("NAMES" + (channels and (" " + ",".join(channels)) or ""))
def nick(self, newnick):
"""Send a NICK command."""
self.send_raw("NICK " + newnick)
def notice(self, target, text):
"""Send a NOTICE command."""
# Should limit len(text) here!
self.send_raw("NOTICE %s :%s" % (target, text))
def oper(self, nick, password):
"""Send an OPER command."""
self.send_raw("OPER %s %s" % (nick, password))
def part(self, channels, message=""):
"""Send a PART command."""
channels = util.always_iterable(channels)
cmd_parts = [
'PART',
','.join(channels),
]
if message: cmd_parts.append(message)
self.send_raw(' '.join(cmd_parts))
def pass_(self, password):
"""Send a PASS command."""
self.send_raw("PASS " + password)
def ping(self, target, target2=""):
"""Send a PING command."""
self.send_raw("PING %s%s" % (target, target2 and (" " + target2)))
def pong(self, target, target2=""):
"""Send a PONG command."""
self.send_raw("PONG %s%s" % (target, target2 and (" " + target2)))
def privmsg(self, target, text):
"""Send a PRIVMSG command."""
self.send_raw("PRIVMSG %s :%s" % (target, text))
def privmsg_many(self, targets, text):
"""Send a PRIVMSG command to multiple targets."""
target = ','.join(targets)
return self.privmsg(target, text)
def quit(self, message=""):
"""Send a QUIT command."""
# Note that many IRC servers don't use your QUIT message
# unless you've been connected for at least 5 minutes!
self.send_raw("QUIT" + (message and (" :" + message)))
def send_raw(self, string):
"""Send raw string to the server.
The string will be padded with appropriate CR LF.
"""
# The string should not contain any carriage return other than the
# one added here.
if '\n' in string:
raise InvalidCharacters(
"Carriage returns not allowed in privmsg(text)")
bytes = string.encode('utf-8') + b'\r\n'
# According to the RFC http://tools.ietf.org/html/rfc2812#page-6,
# clients should not transmit more than 512 bytes.
if len(bytes) > 512:
raise MessageTooLong(
"Messages limited to 512 bytes including CR/LF")
if self.socket is None:
raise ServerNotConnectedError("Not connected.")
sender = getattr(self.socket, 'write', self.socket.send)
try:
sender(bytes)
log.debug("TO SERVER: %s", string)
except socket.error:
# Ouch!
self.disconnect("Connection reset by peer.")
def squit(self, server, comment=""):
"""Send an SQUIT command."""
self.send_raw("SQUIT %s%s" % (server, comment and (" :" + comment)))
def stats(self, statstype, server=""):
"""Send a STATS command."""
self.send_raw("STATS %s%s" % (statstype, server and (" " + server)))
def time(self, server=""):
"""Send a TIME command."""
self.send_raw("TIME" + (server and (" " + server)))
def topic(self, channel, new_topic=None):
"""Send a TOPIC command."""
if new_topic is None:
self.send_raw("TOPIC " + channel)
else:
self.send_raw("TOPIC %s :%s" % (channel, new_topic))
def trace(self, target=""):
"""Send a TRACE command."""
self.send_raw("TRACE" + (target and (" " + target)))
def user(self, username, realname):
"""Send a USER command."""
self.send_raw("USER %s 0 * :%s" % (username, realname))
def userhost(self, nicks):
"""Send a USERHOST command."""
self.send_raw("USERHOST " + ",".join(nicks))
def users(self, server=""):
"""Send a USERS command."""
self.send_raw("USERS" + (server and (" " + server)))
def version(self, server=""):
"""Send a VERSION command."""
self.send_raw("VERSION" + (server and (" " + server)))
def wallops(self, text):
"""Send a WALLOPS command."""
self.send_raw("WALLOPS :" + text)
def who(self, target="", op=""):
"""Send a WHO command."""
self.send_raw("WHO%s%s" % (target and (" " + target), op and (" o")))
def whois(self, targets):
"""Send a WHOIS command."""
self.send_raw("WHOIS " + ",".join(targets))
def whowas(self, nick, max="", server=""):
"""Send a WHOWAS command."""
self.send_raw("WHOWAS %s%s%s" % (nick,
max and (" " + max),
server and (" " + server)))
def set_rate_limit(self, frequency):
"""
Set a `frequency` limit (messages per second) for this connection.
Any attempts to send faster than this rate will block.
"""
self.send_raw = Throttler(self.send_raw, frequency)
def set_keepalive(self, interval):
"""
Set a keepalive to occur every ``interval`` on this connection.
"""
pinger = functools.partial(self.ping, 'keep-alive')
self.irclibobj.execute_every(period=interval, function=pinger)
class Throttler(object):
"""
Rate-limit a function (or other callable)
"""
def __init__(self, func, max_rate=float('Inf')):
if isinstance(func, Throttler):
func = func.func
self.func = func
self.max_rate = max_rate
self.reset()
def reset(self):
self.last_called = 0
def __call__(self, *args, **kwargs):
# ensure at least 1/max_rate seconds from last call
elapsed = time.time() - self.last_called
must_wait = 1 / self.max_rate - elapsed
time.sleep(max(0, must_wait))
self.last_called = time.time()
return self.func(*args, **kwargs)
class DCCConnectionError(IRCError):
pass
class DCCConnection(Connection):
"""
A DCC (Direct Client Connection).
DCCConnection objects are instantiated by calling the dcc
method on an IRC object.
"""
socket = None
def __init__(self, irclibobj, dcctype):
super(DCCConnection, self).__init__(irclibobj)
self.connected = 0
self.passive = 0
self.dcctype = dcctype
self.peeraddress = None
self.peerport = None
def connect(self, address, port):
"""Connect/reconnect to a DCC peer.
Arguments:
address -- Host/IP address of the peer.
port -- The port number to connect to.
Returns the DCCConnection object.
"""
self.peeraddress = socket.gethostbyname(address)
self.peerport = port
self.buffer = LineBuffer()
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = 0
try:
self.socket.connect((self.peeraddress, self.peerport))
except socket.error as x:
raise DCCConnectionError("Couldn't connect to socket: %s" % x)
self.connected = 1
self.irclibobj._on_connect(self.socket)
return self
def listen(self):
"""Wait for a connection/reconnection from a DCC peer.
Returns the DCCConnection object.
The local IP address and port are available as
self.localaddress and self.localport. After connection from a
peer, the peer address and port are available as
self.peeraddress and self.peerport.
"""
self.buffer = LineBuffer()
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = 1
try:
self.socket.bind((socket.gethostbyname(socket.gethostname()), 0))
self.localaddress, self.localport = self.socket.getsockname()
self.socket.listen(10)
except socket.error as x:
raise DCCConnectionError("Couldn't bind socket: %s" % x)
return self
def disconnect(self, message=""):
"""Hang up the connection and close the object.
Arguments:
message -- Quit message.
"""
if not self.connected:
return
self.connected = 0
try:
self.socket.shutdown(socket.SHUT_WR)
self.socket.close()
except socket.error:
pass
del self.socket
self.irclibobj._handle_event(
self,
Event("dcc_disconnect", self.peeraddress, "", [message]))
self.irclibobj._remove_connection(self)
def process_data(self):
"""[Internal]"""
if self.passive and not self.connected:
conn, (self.peeraddress, self.peerport) = self.socket.accept()
self.socket.close()
self.socket = conn
self.connected = 1
log.debug("DCC connection from %s:%d", self.peeraddress,
self.peerport)
self.irclibobj._handle_event(
self,
Event("dcc_connect", self.peeraddress, None, None))
return
try:
new_data = self.socket.recv(2 ** 14)
except socket.error:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
if self.dcctype == "chat":
self.buffer.feed(new_data)
chunks = list(self.buffer)
if len(self.buffer) > 2 ** 14:
# Bad peer! Naughty peer!
log.info("Received >16k from a peer without a newline; "
"disconnecting.")
self.disconnect()
return
else:
chunks = [new_data]
command = "dccmsg"
prefix = self.peeraddress
target = None
for chunk in chunks:
log.debug("FROM PEER: %s", chunk)
arguments = [chunk]
log.debug("command: %s, source: %s, target: %s, arguments: %s",
command, prefix, target, arguments)
self.irclibobj._handle_event(
self,
Event(command, prefix, target, arguments))
def privmsg(self, text):
"""
Send text to DCC peer.
The text will be padded with a newline if it's a DCC CHAT session.
"""
if self.dcctype == 'chat':
text += '\n'
bytes = text.encode('utf-8')
return self.send_bytes(bytes)
def send_bytes(self, bytes):
"""
Send data to DCC peer.
"""
try:
self.socket.send(bytes)
log.debug("TO PEER: %r\n", bytes)
except socket.error:
self.disconnect("Connection reset by peer.")
class SimpleIRCClient(object):
"""A simple single-server IRC client class.
This is an example of an object-oriented wrapper of the IRC
framework. A real IRC client can be made by subclassing this
class and adding appropriate methods.
The method on_join will be called when a "join" event is created
(which is done when the server sends a JOIN messsage/command),
on_privmsg will be called for "privmsg" events, and so on. The
handler methods get two arguments: the connection object (same as
self.connection) and the event object.
Instance attributes that can be used by sub classes:
ircobj -- The IRC instance.
connection -- The ServerConnection instance.
dcc_connections -- A list of DCCConnection instances.
"""
def __init__(self):
self.ircobj = IRC()
self.connection = self.ircobj.server()
self.dcc_connections = []
self.ircobj.add_global_handler("all_events", self._dispatcher, -10)
self.ircobj.add_global_handler("dcc_disconnect", self._dcc_disconnect, -10)
def _dispatcher(self, connection, event):
"""
Dispatch events to on_<event.type> method, if present.
"""
log.debug("_dispatcher: %s", event.type)
do_nothing = lambda c, e: None
method = getattr(self, "on_" + event.type, do_nothing)
method(connection, event)
def _dcc_disconnect(self, c, e):
self.dcc_connections.remove(c)
def connect(self, *args, **kwargs):
"""Connect using the underlying connection"""
self.connection.connect(*args, **kwargs)
def dcc_connect(self, address, port, dcctype="chat"):
"""Connect to a DCC peer.
Arguments:
address -- IP address of the peer.
port -- Port to connect to.
Returns a DCCConnection instance.
"""
dcc = self.ircobj.dcc(dcctype)
self.dcc_connections.append(dcc)
dcc.connect(address, port)
return dcc
def dcc_listen(self, dcctype="chat"):
"""Listen for connections from a DCC peer.
Returns a DCCConnection instance.
"""
dcc = self.ircobj.dcc(dcctype)
self.dcc_connections.append(dcc)
dcc.listen()
return dcc
def start(self):
"""Start the IRC client."""
self.ircobj.process_forever()
class Event(object):
"An IRC event."
def __init__(self, type, source, target, arguments=None):
"""
Initialize an Event.
Arguments:
type -- A string describing the event.
source -- The originator of the event (a nick mask or a server).
target -- The target of the event (a nick or a channel).
arguments -- Any event-specific arguments.
"""
self.type = type
self.source = source
self.target = target
if arguments is None:
arguments = []
self.arguments = arguments
_LOW_LEVEL_QUOTE = "\020"
_CTCP_LEVEL_QUOTE = "\134"
_CTCP_DELIMITER = "\001"
_low_level_mapping = {
"0": "\000",
"n": "\n",
"r": "\r",
_LOW_LEVEL_QUOTE: _LOW_LEVEL_QUOTE
}
_low_level_regexp = re.compile(_LOW_LEVEL_QUOTE + "(.)")
def mask_matches(nick, mask):
"""Check if a nick matches a mask.
Returns true if the nick matches, otherwise false.
"""
nick = strings.lower(nick)
mask = strings.lower(mask)
mask = mask.replace("\\", "\\\\")
for ch in ".$|[](){}+":
mask = mask.replace(ch, "\\" + ch)
mask = mask.replace("?", ".")
mask = mask.replace("*", ".*")
r = re.compile(mask, re.IGNORECASE)
return r.match(nick)
_special = "-[]\\`^{}"
nick_characters = string.ascii_letters + string.digits + _special
def _ctcp_dequote(message):
"""[Internal] Dequote a message according to CTCP specifications.
The function returns a list where each element can be either a
string (normal message) or a tuple of one or two strings (tagged
messages). If a tuple has only one element (ie is a singleton),
that element is the tag; otherwise the tuple has two elements: the
tag and the data.
Arguments:
message -- The message to be decoded.
"""
def _low_level_replace(match_obj):
ch = match_obj.group(1)
# If low_level_mapping doesn't have the character as key, we
# should just return the character.
return _low_level_mapping.get(ch, ch)
if _LOW_LEVEL_QUOTE in message:
# Yup, there was a quote. Release the dequoter, man!
message = _low_level_regexp.sub(_low_level_replace, message)
if _CTCP_DELIMITER not in message:
return [message]
else:
# Split it into parts. (Does any IRC client actually *use*
# CTCP stacking like this?)
chunks = message.split(_CTCP_DELIMITER)
messages = []
i = 0
while i < len(chunks) - 1:
# Add message if it's non-empty.
if len(chunks[i]) > 0:
messages.append(chunks[i])
if i < len(chunks) - 2:
# Aye! CTCP tagged data ahead!
messages.append(tuple(chunks[i + 1].split(" ", 1)))
i = i + 2
if len(chunks) % 2 == 0:
# Hey, a lonely _CTCP_DELIMITER at the end! This means
# that the last chunk, including the delimiter, is a
# normal message! (This is according to the CTCP
# specification.)
messages.append(_CTCP_DELIMITER + chunks[-1])
return messages
def is_channel(string):
"""Check if a string is a channel name.
Returns true if the argument is a channel name, otherwise false.
"""
return string and string[0] in "#&+!"
def ip_numstr_to_quad(num):
"""
Convert an IP number as an integer given in ASCII
representation to an IP address string.
>>> ip_numstr_to_quad('3232235521')
'192.168.0.1'
>>> ip_numstr_to_quad(3232235521)
'192.168.0.1'
"""
n = int(num)
packed = struct.pack('>L', n)
bytes = struct.unpack('BBBB', packed)
return ".".join(map(str, bytes))
def ip_quad_to_numstr(quad):
"""
Convert an IP address string (e.g. '192.168.0.1') to an IP
number as a base-10 integer given in ASCII representation.
>>> ip_quad_to_numstr('192.168.0.1')
'3232235521'
"""
bytes = map(int, quad.split("."))
packed = struct.pack('BBBB', *bytes)
return str(struct.unpack('>L', packed)[0])
class NickMask(six.text_type):
"""
A nickmask (the source of an Event)
>>> nm = NickMask('pinky!username@example.com')
>>> print(nm.nick)
pinky
>>> print(nm.host)
example.com
>>> print(nm.user)
username
>>> isinstance(nm, six.text_type)
True
>>> nm = 'красный!red@yahoo.ru'
>>> if not six.PY3: nm = nm.decode('utf-8')
>>> nm = NickMask(nm)
>>> isinstance(nm.nick, six.text_type)
True
"""
@classmethod
def from_params(cls, nick, user, host):
return cls('{nick}!{user}@{host}'.format(**vars()))
@property
def nick(self):
return self.split("!")[0]
@property
def userhost(self):
return self.split("!")[1]
@property
def host(self):
return self.split("@")[1]
@property
def user(self):
return self.userhost.split("@")[0]
def _ping_ponger(connection, event):
"A global handler for the 'ping' event"
connection.pong(event.target)
# for backward compatibility
LineBuffer = buffer.LineBuffer
DecodingLineBuffer = buffer.DecodingLineBuffer
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for chef_validator.api.v1.actions """
from mock import mock
from oslo_config import cfg
from chef_validator.api.v1.actions import ValidateController
from chef_validator.tests.unit.base import ValidatorTestCase
CONF = cfg.CONF
CONF.import_group('clients_docker', 'chef_validator.clients.docker_client')
class ActionControllerTestCase(ValidatorTestCase):
"""Tests for class ValidateController """
def setUp(self):
"""Create a ValidateController instance """
super(ActionControllerTestCase, self).setUp()
CONF.set_override('url', "url", group='clients_docker')
self.action = ValidateController()
def test_validate(self):
"""Tests for method validate """
self.action.ve.validate_cookbook = mock.MagicMock(return_value="OK")
req = "MyInput"
body = {"cookbook": "fakecb", "image": "fakeimg"}
expected = "OK"
observed = self.action.validate(req, body)
self.assertEqual(expected, observed)
def tearDown(self):
"""Cleanup the ValidateController instance """
super(ActionControllerTestCase, self).tearDown()
self.m.UnsetStubs()
self.m.ResetAll()
|
# -*- coding: utf-8 -*-
# @Time : 2021/1/22 5:17 下午
# @Author : islander
# @File : vanilla_train.py
# @Software: PyCharm
import tensorflow as tf
from typing import Dict
# wrapper of train, replace ph and sess with model.Din
def train_by_net(net, **kwargs):
return train(sess=net.session,
fea_ph=net.features_ph, label_ph=net.labels_ph, outputs=net.outputs,
**kwargs)
def train(
sess: tf.Session,
fea_ph: Dict[str, tf.Tensor], label_ph: tf.Tensor, outputs: Dict[str, tf.Tensor],
input_fn,
train_steps,
verbose=False
):
"""train model by input_fn, without any log or checkpoint
Args:
sess: the session to be run
input_fn: generator to produce inputs
fea_ph: feature input placeholder of the computation graph
label_ph: label input placeholder of the computation graph
outputs: dict of output nodes in the computation graph
train_steps: maximum training steps
verbose: whether to print progress
Returns:
True if input_fn has data else False
"""
steps_count = 0
for features, labels in input_fn(reset=False):
# associate placeholder with data
feed_dict = {fea_ph[feat_name]: features[feat_name] for feat_name in fea_ph}
feed_dict[label_ph] = labels
# do real computation
_ = sess.run(outputs, feed_dict=feed_dict)
steps_count += 1
if verbose:
if steps_count % 100 == 0:
print('{} steps passed'.format(steps_count))
if steps_count >= train_steps:
break
if steps_count == 0:
return False
else:
return True
|
"""
### NOTICE ###
You DO NOT need to upload this file
"""
import argparse
import numpy as np
from environment import Environment
seed = 11037
def parse():
parser = argparse.ArgumentParser(description="MLDS 2018 HW4")
parser.add_argument('--test_pg', action='store_true', help='whether test policy gradient')
parser.add_argument('--test_dqn', action='store_true', help='whether test DQN')
try:
from argument import add_arguments
parser = add_arguments(parser)
except:
pass
args = parser.parse_args()
return args
def test(agent, env, total_episodes=30):
rewards = []
env.seed(seed)
for i in range(total_episodes):
state = env.reset()
agent.init_game_setting()
done = False
episode_reward = 0.0
#playing one game
while(not done):
#env.env.render()
action = agent.make_action(state, test=True)
state, reward, done, info = env.step(action)
episode_reward += reward
rewards.append(episode_reward)
print('[ episode ', i, '] upclipped reward :', episode_reward)
print('Run %d episodes'%(total_episodes))
print('Mean:', np.mean(rewards))
def run(args):
if args.test_pg:
env = Environment('Pong-v0', args, test=True)
from agent_dir.agent_pg import Agent_PG
agent = Agent_PG(env, args)
test(agent, env)
if args.test_dqn:
env = Environment('BreakoutNoFrameskip-v4', args, atari_wrapper=True, test=True)
from agent_dir.agent_dqn import Agent_DQN
agent = Agent_DQN(env, args)
test(agent, env, total_episodes=100)
if __name__ == '__main__':
args = parse()
run(args)
|
# -*- coding: utf-8 -*-
# @Date : 2020/5/20
# @Author: Luokun
# @Email : olooook@outlook.com
import matplotlib.pyplot as plt
import numpy as np
class KNN:
"""
K nearest neighbor classifier(K近邻分类器)
"""
def __init__(self, k: int):
"""
:param K: 分类近邻数
"""
self.k = k
self._X, self._Y = None, None
def fit(self, X: np.ndarray, Y: np.ndarray):
self._X, self._Y = X, Y # 训练集X与Y,类别已知
def __call__(self, X: np.ndarray):
Y = np.zeros([len(X)], dtype=int) # X对应的类别输出变量
for i, x in enumerate(X):
dist = np.linalg.norm(self._X - x, axis=1) # 计算x与所有已知类别点的距离
topk = np.argsort(dist)[:self.k] # 取距离最近的k个点对应的索引
counter = np.bincount(self._Y[topk]) # 统计k近邻点的类别数量
Y[i] = np.argmax(counter) # k近邻次数最多的类别将作为x的类别
return Y
def load_data():
x = np.stack([np.random.randn(200, 2) + np.array([2, 2]),
np.random.randn(200, 2),
np.random.randn(200, 2) + np.array([2, -2])])
y = np.stack([np.full([200], 0), np.full([200], 1), np.full([200], 2)])
return x, y
if __name__ == '__main__':
x, y = load_data()
plt.figure(figsize=[12, 6])
plt.subplot(1, 2, 1)
plt.title('Real')
plt.scatter(x[0, :, 0], x[0, :, 1], color='r', marker='.')
plt.scatter(x[1, :, 0], x[1, :, 1], color='g', marker='.')
plt.scatter(x[2, :, 0], x[2, :, 1], color='b', marker='.')
x, y = x.reshape(-1, 2), y.flatten()
knn = KNN(3)
knn.fit(x, y)
pred = knn(x)
acc = np.sum(pred == y) / len(pred)
print(f'Accuracy = {100 * acc:.2f}%')
x0, x1, x2 = x[pred == 0], x[pred == 1], x[pred == 2]
plt.subplot(1, 2, 2)
plt.title('Pred')
plt.scatter(x0[:, 0], x0[:, 1], color='r', marker='.')
plt.scatter(x1[:, 0], x1[:, 1], color='g', marker='.')
plt.scatter(x2[:, 0], x2[:, 1], color='b', marker='.')
plt.show()
|
# Copyright 1999-2009 University of Chicago
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Object definition for processing GRAM4 usage packets.
"""
from globus.usage.iptimemonitorpacket import IPTimeMonitorPacket
import time
class GRAM4Packet(IPTimeMonitorPacket):
"""
GRAM 4 Usage Packet handler
"""
__MAX_SCHEDULER_SIZE = 20
def __init__(self, address, packet):
"""
"""
IPTimeMonitorPacket.__init__(self, address, packet)
[creation_time_millis] = self.unpack("q")
self.creation_time = \
tuple(
list(
time.gmtime(creation_time_millis / 1000))[0:6])
self.lrm = ''
# Workaround a bug in GRAM service sending LRM name + char[].toString()
# which yields a java object pointer if the LRM name is shorter than
# __MAX_SCHEDULER_SIZE.
# We'll parse until we hit the byte \0, \1, or the __MAX_SCHEDULER_SIZE
# then strip off everything afer [C if it is present
self.lrm = self.unpack_lrm_string()
[
self.job_credential_endpoint_used,
self.file_stage_in_used,
self.file_stage_out_used,
self.file_clean_up_used,
self.clean_up_hold_used
] = map(lambda x: (x == 1), self.unpack("5B"))
[
self.job_type,
self.gt2_error_code,
self.fault_class
] = self.unpack("3B")
insert_statement = '''
INSERT INTO gram_packets(
component_code,
version_code,
send_time,
ip_address,
creation_time,
scheduler_type,
job_credential_endpoint_used,
file_stage_in_used,
file_stage_out_used,
file_clean_up_used,
clean_up_hold_used,
job_type,
gt2_error_code,
fault_class)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'''
def values(self, dbclass):
"""
Return a values tuple which matches the parameters in the
class's insert_statement.
Arguments:
self -- A GRAM4Packet object
Returns:
Tuple containing
(component_code, version_code, send_time, ip_address,
creation_time, scheduler_type, job_credential_endpoint_used,
file_stage_in_used, file_stage_out_used, file_clean_up_used,
job_type, gt2_error_code, fault_class)
"""
return (
self.component_code,
self.packet_version,
dbclass.Timestamp(*self.send_time),
self.ip_address,
dbclass.Timestamp(*self.creation_time),
self.lrm,
self.job_credential_endpoint_used,
self.file_stage_in_used,
self.file_stage_out_used,
self.file_clean_up_used,
self.clean_up_hold_used,
self.job_type,
self.gt2_error_code,
self.fault_class)
def unpack_lrm_string(self):
lrm_string = ''
for _ in range(GRAM4Packet.__MAX_SCHEDULER_SIZE):
[byte_value] = self.unpack("B")
if byte_value > 1:
lrm_string += chr(byte_value)
else:
self.packet_body_offset -= 1
break
return lrm_string.split("[C")[0]
|
class ArangodanticError(Exception):
"""Generic Arangodantic error class."""
pass
class CursorNotFoundError(ArangodanticError):
pass
class DataSourceNotFound(ArangodanticError):
pass
class ModelNotFoundError(ArangodanticError):
pass
class MultipleModelsFoundError(ArangodanticError):
pass
class UniqueConstraintError(ArangodanticError):
pass
class GraphNotFoundError(ArangodanticError):
pass
class ConfigError(ArangodanticError):
pass
class CursorError(ArangodanticError):
pass
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import azure.cli.core.azlogging as azlogging
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.mgmt.eventgrid.models import (
EventSubscription,
WebHookEventSubscriptionDestination,
EventHubEventSubscriptionDestination,
EventSubscriptionFilter)
from six.moves.urllib.parse import quote # pylint: disable=import-error
logger = azlogging.get_az_logger(__name__)
EVENTGRID_NAMESPACE = "Microsoft.EventGrid"
RESOURCES_NAMESPACE = "Microsoft.Resources"
RESOURCE_TYPE_SUBSCRIPTIONS = "subscriptions"
RESOURCE_TYPE_RESOURCE_GROUPS = "resourcegroups"
EVENTGRID_TOPICS = "topics"
WEBHOOK_DESTINATION = "webhook"
EVENTHUB_DESTINATION = "eventhub"
def cli_topic_list(
client,
resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def cli_eventgrid_event_subscription_topic_create(
client,
resource_group_name,
topic_name,
event_subscription_name,
endpoint,
endpoint_type="WebHook",
included_event_types=None,
subject_begins_with=None,
subject_ends_with=None,
is_subject_case_sensitive=False,
labels=None):
return _event_subscription_create(
client,
resource_group_name,
EVENTGRID_NAMESPACE,
EVENTGRID_TOPICS,
topic_name,
event_subscription_name,
endpoint,
endpoint_type,
included_event_types,
subject_begins_with,
subject_ends_with,
is_subject_case_sensitive,
labels)
def cli_eventgrid_event_subscription_topic_get(
client,
resource_group_name,
topic_name,
event_subscription_name):
return _event_subscription_get(
client,
resource_group_name,
EVENTGRID_NAMESPACE,
EVENTGRID_TOPICS,
topic_name,
event_subscription_name)
def cli_eventgrid_event_subscription_topic_get_full_url(
client,
resource_group_name,
topic_name,
event_subscription_name):
return _event_subscription_get_full_url(
client,
resource_group_name,
EVENTGRID_NAMESPACE,
EVENTGRID_TOPICS,
topic_name,
event_subscription_name)
def cli_eventgrid_event_subscription_topic_delete(
client,
resource_group_name,
topic_name,
event_subscription_name):
_event_subscription_delete(
client,
resource_group_name,
EVENTGRID_NAMESPACE,
EVENTGRID_TOPICS,
topic_name,
event_subscription_name)
def cli_eventgrid_event_subscription_resource_create(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name,
event_subscription_name,
endpoint,
endpoint_type="WebHook",
included_event_types=None,
subject_begins_with=None,
subject_ends_with=None,
is_subject_case_sensitive=False,
labels=None):
return _event_subscription_create(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name,
event_subscription_name,
endpoint,
endpoint_type,
included_event_types,
subject_begins_with,
subject_ends_with,
is_subject_case_sensitive,
labels)
def cli_eventgrid_event_subscription_resource_get(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name,
event_subscription_name):
return _event_subscription_get(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name,
event_subscription_name)
def cli_eventgrid_event_subscription_resource_get_full_url(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name,
event_subscription_name):
return _event_subscription_get_full_url(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name,
event_subscription_name)
def cli_eventgrid_event_subscription_resource_delete(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name,
event_subscription_name):
_event_subscription_delete(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name,
event_subscription_name)
def cli_eventgrid_event_subscription_arm_create(
client,
event_subscription_name,
endpoint,
resource_group_name=None,
endpoint_type="WebHook",
included_event_types=None,
subject_begins_with=None,
subject_ends_with=None,
is_subject_case_sensitive=False,
labels=None):
resource_type, resource_name = _get_arm_resource_info(resource_group_name)
return _event_subscription_create(
client,
resource_group_name,
RESOURCES_NAMESPACE,
resource_type,
resource_name,
event_subscription_name,
endpoint,
endpoint_type,
included_event_types,
subject_begins_with,
subject_ends_with,
is_subject_case_sensitive,
labels)
def cli_eventgrid_event_subscription_arm_get(
client,
event_subscription_name,
resource_group_name=None):
resource_type, resource_name = _get_arm_resource_info(resource_group_name)
return _event_subscription_get(
client,
resource_group_name,
RESOURCES_NAMESPACE,
resource_type,
resource_name,
event_subscription_name)
def cli_eventgrid_event_subscription_arm_get_full_url(
client,
event_subscription_name,
resource_group_name=None):
resource_type, resource_name = _get_arm_resource_info(resource_group_name)
return _event_subscription_get_full_url(
client,
resource_group_name,
RESOURCES_NAMESPACE,
resource_type,
resource_name,
event_subscription_name)
def cli_eventgrid_event_subscription_arm_delete(
client,
event_subscription_name,
resource_group_name=None):
resource_type, resource_name = _get_arm_resource_info(resource_group_name)
_event_subscription_delete(
client,
resource_group_name,
RESOURCES_NAMESPACE,
resource_type,
resource_name,
event_subscription_name)
def cli_topic_event_subscription_list(
client,
resource_group_name,
topic_name):
return resource_event_subscription_list_internal(
client,
resource_group_name,
EVENTGRID_NAMESPACE,
EVENTGRID_TOPICS,
topic_name)
def cli_resource_event_subscription_list(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name):
return resource_event_subscription_list_internal(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name)
def cli_event_subscription_list( # pylint: disable=too-many-return-statements
client,
resource_group_name=None,
location=None,
topic_type_name=None):
if topic_type_name:
if location:
if resource_group_name:
return client.list_regional_by_resource_group_for_topic_type(
resource_group_name,
location,
topic_type_name)
return client.list_regional_by_subscription_for_topic_type(
location,
topic_type_name)
if resource_group_name:
return client.list_global_by_resource_group_for_topic_type(
resource_group_name,
topic_type_name)
return client.list_global_by_subscription_for_topic_type(topic_type_name)
if location:
if resource_group_name:
return client.list_regional_by_resource_group(
resource_group_name,
location)
return client.list_regional_by_subscription(location)
if resource_group_name:
return client.list_global_by_resource_group(resource_group_name)
return client.list_global_by_subscription()
def resource_event_subscription_list_internal(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name):
return client.list_by_resource(
resource_group_name,
provider_namespace,
resource_type,
resource_name)
def _event_subscription_create(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name,
event_subscription_name,
endpoint,
endpoint_type,
included_event_types,
subject_begins_with,
subject_ends_with,
is_subject_case_sensitive,
labels):
scope = _get_scope(resource_group_name, provider_namespace, resource_type, resource_name)
if endpoint_type.lower() == WEBHOOK_DESTINATION.lower():
destination = WebHookEventSubscriptionDestination(endpoint)
elif endpoint_type.lower() == EVENTHUB_DESTINATION.lower():
destination = EventHubEventSubscriptionDestination(endpoint)
event_subscription_filter = EventSubscriptionFilter(
subject_begins_with,
subject_ends_with,
included_event_types,
is_subject_case_sensitive)
event_subscription_info = EventSubscription(destination, event_subscription_filter, labels)
async_event_subscription_create = client.create(
scope,
event_subscription_name,
event_subscription_info)
created_event_subscription = async_event_subscription_create.result()
return created_event_subscription
def _event_subscription_get(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name,
event_subscription_name):
scope = _get_scope(resource_group_name, provider_namespace, resource_type, resource_name)
retrieved_event_subscription = client.get(scope, event_subscription_name)
return retrieved_event_subscription
def _event_subscription_get_full_url(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name,
event_subscription_name):
scope = _get_scope(resource_group_name, provider_namespace, resource_type, resource_name)
full_endpoint_url = client.get_full_url(scope, event_subscription_name)
return full_endpoint_url
def _event_subscription_delete(
client,
resource_group_name,
provider_namespace,
resource_type,
resource_name,
event_subscription_name):
scope = _get_scope(resource_group_name, provider_namespace, resource_type, resource_name)
client.delete(scope, event_subscription_name)
def _get_scope(
resource_group_name,
provider_namespace,
resource_type,
resource_name):
subscription_id = get_subscription_id()
if provider_namespace == RESOURCES_NAMESPACE:
if resource_group_name:
scope = (
'/subscriptions/{}/resourceGroups/{}'
.format(quote(subscription_id),
quote(resource_group_name)))
else:
scope = (
'/subscriptions/{}'
.format(quote(subscription_id)))
else:
scope = (
'/subscriptions/{}/resourceGroups/{}/providers/{}/{}/{}'
.format(quote(subscription_id),
quote(resource_group_name),
quote(provider_namespace),
quote(resource_type),
quote(resource_name)))
return scope
def _get_arm_resource_info(resource_group_name):
if resource_group_name:
resource_type = RESOURCE_TYPE_RESOURCE_GROUPS
resource_name = resource_group_name
else:
resource_type = RESOURCE_TYPE_SUBSCRIPTIONS
resource_name = get_subscription_id()
return resource_type, resource_name
|
import glob
import shutil
import os
import pandas as pd
import numpy as np
import nrrd
import re
import matplotlib
import matplotlib.pyplot as plt
import pickle
from time import gmtime, strftime
from datetime import datetime
import timeit
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from utils.resize_3d import resize_3d
from utils.crop_image import crop_image
from utils.respacing import respacing
from utils.nrrd_reg import nrrd_reg_rigid_ref
from train_data.get_img_dataset import img_dataset
def tune_pat_dataset(data_dir, pre_data_dir, pro_data_dir, label_dir, label_file,
crop_shape=[192, 192, 140], interp_type='linear', input_channel=3,
norm_type='np_clip', data_exclude=None, new_spacing=[1, 1, 3]):
"""
Preprocess data (respacing, registration, cropping) for chest CT dataset;
Arguments:
proj_dir {path} -- path to main project folder;
out_dir {path} -- path to result outputs;
Keyword arguments:
new_spacing {tuple} -- respacing size, defaul [1, 1, 3];
return_type {str} -- image data format after preprocessing, default: 'nrrd';
data_exclude {str} -- exclude patient data due to data issue, default: None;
crop_shape {np.array} -- numpy array size afer cropping;
interp_type {str} -- interpolation type for respacing, default: 'linear';
Return:
save nrrd image data;
"""
reg_temp_img = os.path.join(data_dir, 'CH001.nrrd')
df_label = pd.read_csv(os.path.join(label_dir, label_file))
df_label['Contrast'] = df_label['Contrast'].map({'Yes': 1, 'No': 0})
labels = df_label['Contrast'].to_list()
## create df for dir, ID and labels on patient level
fns = []
IDs = []
fns = [fn for fn in sorted(glob.glob(data_dir + '/*nrrd'))]
for fn in sorted(glob.glob(data_dir + '/*nrrd')):
ID = fn.split('/')[-1].split('.')[0].strip()
IDs.append(ID)
print('ID:', len(IDs))
print('file:', len(fns))
print('label:', len(labels))
print('contrast scan in ex val:', labels.count(1))
print('non-contrast scan in ex val:', labels.count(0))
df = pd.DataFrame({'ID': IDs, 'file': fns, 'label': labels})
df.to_csv(os.path.join(pro_data_dir, 'tune_pat_df.csv'), index=False)
print('total scan:', df.shape[0])
## delete excluded scans and repeated scans
if data_exclude != None:
df_exclude = df[df['ID'].isin(data_exclude)]
print('exclude scans:', df_exclude)
df.drop(df[df['ID'].isin(test_exclude)].index, inplace=True)
print('total test scans:', df.shape[0])
pd.options.display.max_columns = 100
pd.set_option('display.max_rows', 500)
#print(df[0:50])
### registration, respacing, cropping
for fn, ID in zip(df['file'], df['ID']):
print(ID)
## respacing
img_nrrd = respacing(
nrrd_dir=fn,
interp_type=interp_type,
new_spacing=new_spacing,
patient_id=ID,
return_type='nrrd',
save_dir=None
)
## registration
img_reg = nrrd_reg_rigid_ref(
img_nrrd=img_nrrd,
fixed_img_dir=reg_temp_img,
patient_id=ID,
save_dir=None
)
## crop image from (500, 500, 116) to (180, 180, 60)
img_crop = crop_image(
nrrd_file=img_reg,
patient_id=ID,
crop_shape=crop_shape,
return_type='nrrd',
save_dir=pre_data_dir
)
def tune_img_dataset(pro_data_dir, pre_data_dir, slice_range=range(50, 120), input_channel=3,
norm_type='np_clip', split=True, fn_arr_1ch=None):
"""
get stacked image slices from scan level CT and corresponding labels and IDs;
Args:
run_type {str} -- train, val, test, external val, pred;
pro_data_dir {path} -- path to processed data;
nrrds {list} -- list of paths for CT scan files in nrrd format;
IDs {list} -- list of patient ID;
labels {list} -- list of patient labels;
slice_range {np.array} -- image slice range in z direction for cropping;
run_type {str} -- train, val, test, or external val;
pro_data_dir {path} -- path to processed data;
fn_arr_1ch {str} -- filename for 1 d numpy array for stacked image slices;
fn_arr_3ch {str} -- filename for 3 d numpy array for stacked image slices;
fn_df {str} -- filename for dataframe contains image path, image labels and image ID;
Keyword args:
input_channel {str} -- image channel, default: 3;
norm_type {str} -- image normalization type: 'np_clip' or 'np_linear';
Returns:
img_df {pd.df} -- dataframe contains preprocessed image paths, label, ID (image level);
"""
df = pd.read_csv(os.path.join(pro_data_dir, 'tune_pat_df.csv'))
fns = [fn for fn in sorted(glob.glob(pre_data_dir + '/*nrrd'))]
labels = df['label']
IDs = df['ID']
## split dataset for fine-tuning model and test model
if split == True:
data_train, data_test, label_train, label_test, ID_train, ID_test = train_test_split(
fns,
labels,
IDs,
stratify=labels,
shuffle=True,
test_size=0.3,
random_state=42
)
nrrdss = [data_train, data_test]
labelss = [label_train, label_test]
IDss = [ID_train, ID_test]
fn_arrs = ['train_arr.npy', 'test_arr.npy']
fn_dfs = ['train_img_df.csv', 'test_img_df.csv']
## creat numpy array for image slices
for nrrds, labels, IDs, fn_arr, fn_df in zip(nrrdss, labelss, IDss, fn_arrs, fn_dfs):
img_dataset(
pro_data_dir=pro_data_dir,
run_type='tune',
nrrds=nrrds,
IDs=IDs,
labels=labels,
fn_arr_1ch=None,
fn_arr_3ch=fn_arr,
fn_df=fn_df,
slice_range=slice_range,
input_channel=3,
norm_type=norm_type,
)
print('train and test datasets created!')
## use entire exval data to test model
elif split == False:
nrrds = fns
labels = labels
IDs = IDs
img_dataset(
pro_data_dir=pro_data_dir,
run_type='tune',
nrrds=nrrds,
IDs=IDs,
labels=labels,
fn_arr_1ch=None,
fn_arr_3ch='train_arr.npy',
fn_df='train_img_df.csv',
slice_range=slice_range,
input_channel=3,
norm_type=norm_type,
)
print('total patient:', len(IDs))
print('exval datasets created!')
|
from collections import defaultdict, deque
from functools import lru_cache
from typing import List
class Solution:
def loudAndRich(self, richer: List[List[int]], quiet: List[int]):
graph = defaultdict(set)
for u, v in richer:
graph[v].add(u)
ans = [-1 for _ in range(len(quiet))]
def dfs(node):
if ans[node] < 0:
ans[node] = node
for child in graph[node]:
cand = dfs(child)
if quiet[cand] < quiet[ans[node]]:
ans[node] = cand
return ans[node]
@lru_cache(None)
def dfs2(node):
if node not in graph:
return node
minimum = node
for neighbour in graph[node]:
candidate = dfs(neighbour)
if quiet[minimum] > quiet[candidate]:
minimum = candidate
return minimum
for i in range(len(quiet)):
p = dfs(i)
ans[i] = p
return ans
def loudAndRichTLE(self, richer: List[List[int]], quiet: List[int]):
graph = defaultdict(set)
for u, v in richer:
graph[v].add(u)
ans = [0 for _ in range(len(quiet))]
def bfs(node, q):
queue = deque([node])
res = node
while queue:
cur = queue.popleft()
if quiet[cur] < q:
print(cur)
q = quiet[cur]
res = cur
for child in graph[cur]:
queue.append(child)
return res
for i in range(len(quiet)):
p = bfs(i, quiet[i])
ans[i] = p
return ans
richer = [[1, 0], [2, 1], [3, 1], [3, 7], [4, 3], [5, 3], [6, 3]]
quiet = [3, 2, 5, 4, 6, 1, 7, 0]
s = Solution()
print(s.loudAndRich(richer, quiet))
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Classes:
DatasetLoader - Generate OCW Dataset objects from a variety of sources.
'''
import ocw.data_source.local as local
import ocw.data_source.rcmed as rcmed
import ocw.data_source.podaac_datasource as podaac
import warnings
class DatasetLoader:
'''Generate a list of OCW Dataset objects from a variety of sources.'''
def __init__(self, *loader_opts):
'''Generate a list of OCW Dataset objects from a variety of sources.
Each keyword argument can be information for a dataset in dictionary
form. For example:
``
>>> loader_opt1 = {'loader_name': 'rcmed', 'name': 'cru',
'dataset_id': 10, 'parameter_id': 34}
>>> loader_opt2 = {'path': './data/TRMM_v7_3B43_1980-2010.nc,
'variable': 'pcp'}
>>> loader = DatasetLoader(loader_opt1, loader_opt2)
``
Or more conveniently if the loader configuration is defined in a
yaml file named config_file (see RCMES examples):
``
>>> import yaml
>>> config = yaml.load(open(config_file))
>>> obs_loader_config = config['datasets']['reference']
>>> loader = DatasetLoader(*obs_loader_config)
``
As shown in the first example, the dictionary for each argument should
contain a loader name and parameters specific to the particular loader.
Once the configuration is entered, the datasets may be loaded using:
``
>>> loader.load_datasets()
>>> obs_datasets = loader.datasets
``
Additionally, each dataset must have a ``loader_name`` keyword. This may
be one of the following:
* ``'local'`` - One or multiple dataset files in a local directory
* ``'local_split'`` - A single dataset split accross multiple files in a
local directory
* ``'esgf'`` - Download the dataset from the Earth System Grid
Federation
* ``'rcmed'`` - Download the dataset from the Regional Climate Model
Evaluation System Database
* ``'dap'`` - Download the dataset from an OPeNDAP URL
* ``'podaac'`` - Download the dataset from Physical Oceanography
Distributed Active Archive Center
Users who wish to load datasets from loaders not described above may
define their own custom dataset loader function and incorporate it as
follows:
>>> loader.add_source_loader('my_loader_name', my_loader_func)
:param loader_opts: Dictionaries containing the each dataset loader
configuration, representing the keyword arguments of
the loader function specified by an additional key
called 'loader_name'. If not specified by the user,
this defaults to local.
:type loader_opts: :class:`dict`
:raises KeyError: If an invalid argument is passed to a data source
loader function.
'''
# dataset loader config
self.set_loader_opts(*loader_opts)
# Default loaders
self._source_loaders = {
'local': local.load_multiple_files,
'local_split': local.load_dataset_from_multiple_netcdf_files,
'rcmed': rcmed.parameter_dataset,
'podaac': podaac.load_level4_granule
}
# Exclude esgf and dap for python 3 until they are compatible
try:
import ocw.data_source.esgf as esgf
import ocw.data_source.dap as dap
self._source_loaders['dap'] = dap.load
self._source_loaders['esgf'] = esgf.load_dataset
except ImportError:
warnings.warn('dap and esgf loaders missing. If these are needed, '
'fallback to python 2.7.x.')
def add_source_loader(self, loader_name, loader_func):
'''
Add a custom source loader.
:param loader_name: The name of the data source.
:type loader_name: :mod:`string`
:param loader_func: Reference to a custom defined function. This should
return an OCW Dataset object, and have an origin which satisfies
origin['source'] == loader_name.
:type loader_func: :class:`callable`
'''
self._source_loaders[loader_name] = loader_func
def add_loader_opts(self, *loader_opts):
'''
A convenient means of adding loader options for each dataset to the
loader.
:param loader_opts: Dictionaries containing the each dataset loader
configuration, representing the keyword arguments of
the loader function specified by an additional key
called 'loader_name'. If not specified by the user,
this defaults to local.
:type loader_opts: :mod:`dict`
'''
for opt in loader_opts:
if 'loader_name' not in opt:
opt['loader_name'] = 'local'
self._config.extend(loader_opts)
def set_loader_opts(self, *loader_opts):
'''
Reset the dataset loader config.
:param loader_opts: Dictionaries containing the each dataset loader
configuration, representing the keyword arguments of
the loader function specified by an additional key
called 'loader_name'. If not specified by the user,
this defaults to local.
:type loader_opts: :mod:`dict`
'''
self._config = []
self.add_loader_opts(*loader_opts)
def load_datasets(self):
'''
Loads the datasets from the given loader configurations.
'''
# Ensure output is clear if loading is performed more than once to
# prevent duplicates.
self.datasets = []
# Load the datasets
for loader_opt in self._config:
output = self._load(**loader_opt)
# Need to account for the fact that some loaders return lists
# of OCW Dataset objects instead of just one
if isinstance(output, list):
self.datasets.extend(output)
else:
self.datasets.append(output)
def _load(self, **kwargs):
'''
Generic dataset loading method.
'''
# Extract the loader name
loader_name = kwargs.pop('loader_name')
# Find the correct loader function for the given data source
loader_func = self._source_loaders[loader_name]
# The remaining kwargs should be specific to the loader
output = loader_func(**kwargs)
# Preserve loader_name info for later use
kwargs['loader_name'] = loader_name
return output
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 2 14:37:04 2019
@author: B.Mika-Gospodorz
Input files: stdin with bam file with multi-mapped reads, reference_host_names.txt and reference_pathogen_names.txt files that contain references extracted with extract_reference_names_from_fasta_files.sh
Output: txt file with cross-mapped reads
Description: Used to identify and extract reads that mapped onto both host and pathogen genomes (cross-mapped reads). The script is executed by remove_crossmapped_reads_BAM.sh and remove_crossmapped_read_paires_BAM.sh scripts.
"""
import sys
import argparse
import pysam
# function to identify references given read mapped to
def check_reference_organisms(read_reference_name, host_reference_names,pathogen_reference_names):
reference = ''
if read_reference_name in host_reference_names: # if reference of read is in the list of host references, set host as reference
reference = 'host'
elif read_reference_name in pathogen_reference_names: # if reference of read is in the list of pathogen references, set pathogen as reference
reference = 'pathogen'
else:
print(('There is no ' + read_reference_name + ' in the reference name set'))
return reference
# function to add read and its reference to dictionary
def add_read(multimapped_reads, read_name, reference_name):
if read_name in multimapped_reads: # if read is in the dict, and reference is not defined, append the reference
if reference_name not in multimapped_reads[read_name]:
multimapped_reads[(read_name)].append((reference_name))
else: # else create new key (read) and set reference as value
multimapped_reads[read_name] = [reference_name]
# function to find and save cross-mapped reads
def find_and_save_cross_mapped_reads(multimapped_reads_with_reference, output_file_name):
# extract reads with more than 1 reference name defined (cross-mapped reads)
crossmapeed_reads = [read_name for read_name, reference_name in list(multimapped_reads_with_reference.items()) if len(reference_name) > 1]
# save cross-mapped reads
with open(output_file_name, 'w') as f:
for cross_mapped_read in crossmapeed_reads:
f.write(str(cross_mapped_read) + '\n')
# function to identify and save cross-mapped reads
def read_reads_from_samfile(sam_file_name, host_reference_names, pathogen_reference_names, output_file_name):
# read bam file
samfile = pysam.AlignmentFile(sam_file_name, "rb")
# initialize dictionary of multi-mapped reads
multimapped_reads = dict()
for read in samfile: # iterate over reads from bam file
# find reference the read mapped to
reference_organisms = check_reference_organisms(read.reference_name,host_reference_names,pathogen_reference_names)
# add read and reference to multimapped_reads dict.
add_read(multimapped_reads,read.query_name,reference_organisms)
# find and save cross-mapped reads
find_and_save_cross_mapped_reads(multimapped_reads,output_file_name)
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", default="cross_mapped_reads.txt",metavar='output_file_name', help="output file name")
parser.add_argument("-h_ref", "--host_reference_names", metavar='<host_reference_name>', help="Path to reference_host_names.txt file")
parser.add_argument("-p_ref", "--pathogen_reference_names", metavar='<pathogen_reference_name>', help="Path to reference_pathogen_names.txt file")
args = parser.parse_args()
# create list of host and pathogen chromosome/plasmid names
host_reference_names = [line.rstrip() for line in open(args.host_reference_names)]
pathogen_reference_names = [line.rstrip() for line in open(args.pathogen_reference_names)]
# identify and extract cross-mapped reads
read_reads_from_samfile(sys.stdin, host_reference_names, pathogen_reference_names, args.output)
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq search_entitlement`."""
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.entitlement import (
get_entitlements_options,
)
from aquilon.aqdb.model import (
Archetype,
Cluster,
Entitlement,
Grn,
HardwareEntity,
Host,
Personality,
PersonalityStage,
User,
UserType,
)
from sqlalchemy import (
and_,
or_,
)
class CommandSearchEntitlement(BrokerCommand):
def render(self, session, logger, to_any_grn, to_any_user,
to_any_user_of_type, on_exact_location, **arguments):
# Parse the options to get the entitlements options
dbtos, dbons, dblocations, dbenvs, dbtype = get_entitlements_options(
session=session, logger=logger, config=self.config, **arguments)
# Prepare the query
query = session.query(Entitlement)
query = query.outerjoin(
Personality, Entitlement.personality_id == Personality.id)
query = query.outerjoin(
Host, Entitlement.host_id == Host.hardware_entity_id)
query = query.outerjoin(
Cluster, Entitlement.cluster_id == Cluster.id)
query = query.outerjoin(
User, Entitlement.user_id == User.id)
# Apply the type conditions if provided
if dbtype:
query = query.filter(Entitlement.type_id == dbtype.id)
# Apply conditions on the 'to' field
toconds = []
if to_any_user:
toconds.append(Entitlement.user_id.isnot(None))
else:
for user_type in set(to_any_user_of_type or []):
dbtype = UserType.get_unique(session, name=user_type,
compel=True)
toconds.append(and_(Entitlement.user_id.isnot(None),
User.type_id == dbtype.id))
users = [t.id for t in dbtos if isinstance(t, User)]
if users:
toconds.append(Entitlement.user_id.in_(users))
if to_any_grn:
toconds.append(Entitlement.eon_id.isnot(None))
else:
grns = [t.eon_id for t in dbtos if isinstance(t, Grn)]
if grns:
toconds.append(Entitlement.eon_id.in_(grns))
if toconds:
query = query.filter(or_(*toconds))
# Apply conditions on the 'on' field
onconds = []
on_types = [
{
'class': Host,
'field': 'hardware_entity_id',
},
{
'class': Cluster,
},
{
'class': Personality,
},
{
'class': Archetype,
},
{
'class': Grn,
'field': 'eon_id',
'match': 'target_eon_id',
},
]
for tinfo in on_types:
cls = tinfo['class']
name = tinfo.get('name', cls.__name__.lower())
field = tinfo.get('field', 'id')
match = tinfo.get('match', '{}_id'.format(name))
on_any = arguments.get('on_any_{}'.format(name))
if on_any:
onconds.append(getattr(Entitlement, match).isnot(None))
else:
ids = [getattr(o, field) for o in dbons if isinstance(o, cls)]
if ids:
onconds.append(getattr(Entitlement, match).in_(ids))
if onconds:
query = query.filter(or_(*onconds))
# Apply conditions on the location
if dblocations:
# If we do not require the exact location, include the offspring
# ids in the search values
if on_exact_location:
loc_ids = [l.id
for l in dblocations]
else:
loc_ids = or_(*[l.offspring_ids() for l in dblocations])
locconds = []
# Filter the entitlements that directly provide a location
locconds.append(Entitlement.location_id.in_(loc_ids))
# Filter the entitlements against hosts by getting the host's
# location through the hardware entity
locconds.append(and_(
Entitlement.host_id.isnot(None),
Host.hardware_entity.has(
HardwareEntity.location_id.in_(loc_ids))))
# Filter the entitlements against clusters by getting the
# cluster's hosts, and those host's location through the
# hardware entity
locconds.append(and_(
Entitlement.cluster_id.isnot(None),
Cluster.hosts.any(Host.hardware_entity.has(
HardwareEntity.location_id.in_(loc_ids)))))
query = query.filter(or_(*locconds))
# Apply conditions on the environment
if dbenvs:
env_ids = [e.id for e in dbenvs]
envconds = []
# Filter the entitlements that directly provide a host environment
envconds.append(Entitlement.host_environment_id.in_(env_ids))
# Filter the entitlements against personalities by getting
# directly the personality's host environment
envconds.append(and_(
Entitlement.personality_id.isnot(None),
Personality.host_environment_id.in_(env_ids)))
# Filter the entitlements against hosts by getting the host's
# personality, and that personality's host environment
envconds.append(and_(
Entitlement.host_id.isnot(None),
Host.personality_stage.has(
PersonalityStage.personality.has(
Personality.host_environment_id.in_(env_ids)))))
# Filter the entitlements against clusters by getting the
# cluster's hosts, and those host's personalities, and those
# personalities' host environments
envconds.append(and_(
Entitlement.cluster_id.isnot(None),
Cluster.hosts.any(Host.personality_stage.has(
PersonalityStage.personality.has(
Personality.host_environment_id.in_(env_ids))))))
query = query.filter(or_(*envconds))
# Return query results
return query.all()
|
"""Autcompletion support for the fish shell"""
from typing import Union
from pathlib import Path
from arc import types
from arc.config import config
from arc._command import Command, helpers
COMPLETE = "complete -c"
SEEN_SUBCOMMAND = "__fish_seen_subcommand_from"
class FishCompletion:
def __init__(self, command: str):
self.lines: list[str] = []
self._buffer = ""
self.command = command
def __str__(self):
self.flush()
return "\n".join(self.lines)
def add(self, content: str, no_format=False):
if no_format:
self.lines.append(content)
else:
self.lines.append(f"{COMPLETE} {self.command}{content}")
def comment(self, comment: str):
self.add(f"\n# {comment}", no_format=True)
return self
def flush(self):
if self._buffer:
self.add(self._buffer)
self._buffer = ""
return self
def _buffer_add(self, content: str):
self._buffer += " " + content
return self
def set(self, name: str, value: str):
self.lines.append(f"set -l {name} {value}")
return self
def no_files(self):
return self._buffer_add("-f")
def force_files(self):
return self._buffer_add("-F")
def description(self, desc: str):
return self._buffer_add(f'-d "{desc}"')
def arguments(self, args: Union[list[str], str]):
if isinstance(args, list):
args = " ".join(args)
return self._buffer_add(f'-a "{args}"')
def long(self, name: str):
return self._buffer_add(f"-l {name}")
def short(self, name: str):
return self._buffer_add(f"-s {name}")
def keep_order(self):
return self._buffer_add("-k")
def required(self):
return self._buffer_add("-r")
def exclusive(self):
return self._buffer_add("-x")
def condition(self, condition: str):
return self._buffer_add(f'-n "{condition}"')
def seen_subcommand(self, *args: str):
return self.condition(f"{SEEN_SUBCOMMAND} {' '.join(args)}")
def not_seen_subcommand(self, *args: str):
return self.condition(f"not {SEEN_SUBCOMMAND} {' '.join(args)}")
def generate(name: str, root: Command) -> str:
commands = [
command for command in helpers.get_all_commands(root) if command[0] != root
]
completions = (
FishCompletion(name)
.comment("Setup")
.set("commands", " ".join(name for _, name in commands))
.no_files()
.flush()
)
completions.comment("Help").seen_subcommand("help").arguments("$commands").flush()
completions.comment("Commands")
command_completions(completions, commands)
for command, cmd_name in commands:
argument_completions(completions, command, cmd_name)
return str(completions)
def command_completions(
completions: FishCompletion, commands: list[tuple[Command, str]]
):
for command, cmd_name in commands:
state = ExecutionState.empty()
state.command_chain = [command]
(
completions.not_seen_subcommand("$commands")
.arguments(cmd_name)
.description(command.doc(state).short_description)
.flush()
)
def argument_completions(completions: FishCompletion, command: Command, cmd_name: str):
...
# completions.comment(f"{cmd_name} Arguments")
# for arg in command.executable.params.values():
# if arg.hidden or arg.is_positional:
# continue
# else:
# completions.seen_subcommand(cmd_name).long(arg.arg_alias)
# if arg.short:
# completions.short(arg.short)
# completions.description(f"{types.type_store.get_display_name(arg.annotation)}")
# if types.safe_issubclass(arg.annotation, Path):
# completions.force_files()
# else:
# completions.no_files()
# completions.flush()
|
from sync_conda_environments.helpers.process import run_subprocess
def test_run_subprocess():
output = run_subprocess(["pwd"])
assert output is not None
assert isinstance(output, bytes)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.