content
stringlengths 5
1.05M
|
|---|
import os
import shutil
from subprocess import call
import pytest
# this is deprecated, sould be @pytest.fixture
# but travis uses an old version of pytest for python 3.4
@pytest.yield_fixture(scope="class")
def test_directory_fixture():
base_dir = os.getcwd()
test_dir_name = 'temp_directory1'
full_path = os.path.join(base_dir, test_dir_name)
if os.path.exists(full_path):
shutil.rmtree(test_dir_name)
call(['golem-admin', 'createdirectory', test_dir_name])
yield {'full_path': full_path,
'base_path': base_dir,
'test_directory_name': test_dir_name}
os.chdir(base_dir)
shutil.rmtree(test_dir_name)
@pytest.mark.usefixtures("test_directory")
@pytest.yield_fixture(scope="class")
def project_fixture(test_directory_fixture):
project_name = 'temp_project1'
os.chdir(test_directory_fixture['test_directory_name'])
call(['python', 'golem.py', 'createproject', project_name])
yield {'test_directory_fixture': test_directory_fixture,
'project_name': project_name}
|
# @lc app=leetcode id=710 lang=python3
#
# [710] Random Pick with Blacklist
#
# https://leetcode.com/problems/random-pick-with-blacklist/description/
#
# algorithms
# Hard (33.27%)
# Likes: 497
# Dislikes: 83
# Total Accepted: 22.2K
# Total Submissions: 66.8K
# Testcase Example: '["Solution","pick","pick","pick","pick","pick","pick","pick"]\n' +
# '[[7,[2,3,5]],[],[],[],[],[],[],[]]'
#
# You are given an integer n and an array of unique integers blacklist. Design
# an algorithm to pick a random integer in the range [0, n - 1] that is not in
# blacklist. Any integer that is in the mentioned range and not in blacklist
# should be equally likely to be returned.
#
# Optimize your algorithm such that it minimizes the number of calls to the
# built-in random function of your language.
#
# Implement the Solution class:
#
#
# Solution(int n, int[] blacklist) Initializes the object with the integer n
# and the blacklisted integers blacklist.
# int pick() Returns a random integer in the range [0, n - 1] and not in
# blacklist.
#
#
#
# Example 1:
#
#
# Input
# ["Solution", "pick", "pick", "pick", "pick", "pick", "pick", "pick"]
# [[7, [2, 3, 5]], [], [], [], [], [], [], []]
# Output
# [null, 0, 4, 1, 6, 1, 0, 4]
#
# Explanation
# Solution solution = new Solution(7, [2, 3, 5]);
# solution.pick(); // return 0, any integer from [0,1,4,6] should be ok. Note
# that for every call of pick,
# // 0, 1, 4, and 6 must be equally likely to be returned
# (i.e., with probability 1/4).
# solution.pick(); // return 4
# solution.pick(); // return 1
# solution.pick(); // return 6
# solution.pick(); // return 1
# solution.pick(); // return 0
# solution.pick(); // return 4
#
#
#
# Constraints:
#
#
# 1 <= n <= 10^9
# 0 <= blacklist.length <- min(10^5, n - 1)
# 0 <= blacklist[i] < n
# All the values of blacklist are unique.
# At most 2 * 10^4 calls will be made to pick.
#
#
#
# @lc tags=Unknown
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 随机数,有黑名单。
# 生成随机数,得到的随机数,就是在空白位置上的索引。
# 将黑名单排序,得到每个空白区域前面的黑名单数量,得到跳跃的个数。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def __init__(self, n: int, blacklist: List[int]):
blacklist.sort()
blc = []
preb = -1
prec = 0
for b in blacklist:
if b == preb:
blc.append(prec)
else:
prec = b - len(blc)
blc.append(prec)
preb = b
self.blc = blc
self.s = n - len(self.blc)
def pick(self) -> int:
r = random.randint(0, self.s - 1)
idx = bisect_right(self.blc, r)
r += idx
return r
# @lc code=end
# @lc main=start
if __name__ == '__main__':
o = Solution(7, [2, 3, 5])
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
print(o.pick())
pass
# @lc main=end
|
"""Front end for Quack"""
from lark import Lark, Transformer
import argparse
import sys
from typing import List, Tuple
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def cli():
cli_parser = argparse.ArgumentParser()
cli_parser.add_argument("source", type=argparse.FileType("r"),
nargs="?", default=sys.stdin)
args = cli_parser.parse_args()
return args
def cli():
cli_parser = argparse.ArgumentParser()
cli_parser.add_argument("source", type=argparse.FileType("r"),
nargs="?", default=sys.stdin)
args = cli_parser.parse_args()
return args
LB = "{"
RB = "}"
class ASTNode:
"""Abstract base class"""
def r_eval(self) -> List[str]:
"""Evaluate for value"""
raise NotImplementedError(f"r_eval not implemented for node type {self.__class__.__name__}")
def c_eval(self, true_branch: str, false_branch: str) -> List[str]:
raise NotImplementedError(f"c_eval not implemented for node type {self.__class__.__name__}")
def json(self) -> str:
return f"No json method defined for {self.__class__.__name__}"
class ProgramNode(ASTNode):
def __init__(self, classes: List[ASTNode], main_block: ASTNode):
self.classes = classes
main_class = ClassNode("$Main", [], "Obj", [], main_block)
self.classes.append(main_class)
def __str__(self) -> str:
return "\n".join([str(c) for c in self.classes])
class ClassNode(ASTNode):
def __init__(self, name: str, formals: List[ASTNode],
super_class: str,
methods: List[ASTNode],
block: ASTNode):
self.name = name
self.super_class = super_class
self.methods = methods
self.constructor = MethodNode("$constructor", formals, name, block)
def __str__(self):
formals_str = ", ".join([str(fm) for fm in self.constructor.formals])
methods_str = "\n".join([f"{method}\n" for method in self.methods])
return f"""
class {self.name}({formals_str}){LB}
/* methods go here */
{methods_str}
/* statements */
{self.constructor}
{RB}
"""
class MethodNode(ASTNode):
def __init__(self, name: str, formals: List[ASTNode],
returns: str, body: ASTNode):
self.name = name
self.formals = formals
self.returns = returns
self.body = body
def __str__(self):
formals_str = ", ".join([str(fm) for fm in self.formals])
return f"""
/* method */
def {self.name}({formals_str}): {self.returns} {LB}
{self.body}
{RB}
"""
class FormalNode(ASTNode):
def __init__(self, var_name: str, var_type: str):
self.var_name = var_name
self.var_type = var_type
def __str__(self):
return f"{self.var_name}: {self.var_type}"
class BlockNode(ASTNode):
def __init__(self, blahblah: str):
self.blahblah = blahblah
def __str__(self):
return f"{self.blahblah}"
quack_grammar = r"""
?start: program
?program: classes block
classes: clazz*
clazz: _class_sig "{" methods block "}"
_class_sig: "class" name "(" formals ")" [ "extends" name ]
methods: method*
formals: formal ("," formal)*
formal: name ":" name
?constructor: block
name: IDENT -> ident
block: BLAH*
method: "def" name "(" formals ")" returns "{" block "}"
returns: (":" name)?
BLAH: "blah;"
IDENT: /[_a-zA-Z][_a-zA-Z0-9]*/
%import common.WS
%ignore WS
"""
class ASTBuilder(Transformer):
"""Translate Lark tree into my AST structure"""
def program(self, e):
log.debug("->program")
classes, main_block = e
return ProgramNode(classes, main_block)
def classes(self, e):
return e
def clazz(self, e):
log.debug("->clazz")
name, formals, super, methods, constructor = e
return ClassNode(name, formals, super, methods, constructor)
def methods(self, e):
return e
def method(self, e):
log.debug("->method")
name, formals, returns, body = e
return MethodNode(name, formals, returns, body)
def returns(self, e):
if not e:
return "Nothing"
return e
def formals(self, e):
return e
def formal(self, e):
log.debug("->formal")
var_name, var_type = e
return FormalNode(var_name, var_type)
def ident(self, e):
"""A terminal symbol """
log.debug("->ident")
return e[0]
def block(self, e) -> ASTNode:
log.debug("->block")
blahs = [str(id) for id in e]
return "\n".join(blahs)
def main():
args = cli()
quack_parser = Lark(quack_grammar)
text = "".join(args.source.readlines())
tree = quack_parser.parse(text)
# print(tree.pretty(" "))
ast = ASTBuilder().transform(tree)
print(ast)
if __name__ == "__main__":
main()
|
from flask import make_response, request, render_template, redirect, url_for
from models.session import Session
from models.user import User
from werkzeug.security import check_password_hash
class LoginView:
RESULT_INVALIDCREDENTIALS = "invalidcredentials"
def __init__(self):
self.session = Session(request.cookies.get("session_id"))
self.view_model = {
"logged_in": self.session.logged_in,
"username": self.session.username,
"result": self.result_string(),
}
def get_login(self):
if self.session.logged_in:
return redirect(url_for(".home"))
response = make_response(render_template("login.html", vm=self.view_model))
response.set_cookie("session_id", self.session.session_id, httponly=True, secure=True)
return response
def post_login(self):
if self.session.logged_in:
return redirect(url_for(".home"))
fail_response_url = url_for(".login") + f"?result={self.RESULT_INVALIDCREDENTIALS}"
user = User(request.form["username"])
if not user.user_exists():
return redirect(fail_response_url)
if check_password_hash(user.password, request.form["password"]):
self.session.username = user.username
self.session.update_session()
return redirect(url_for(".home"))
return redirect(fail_response_url)
def result_string(self):
result = request.args.get("result")
if result is None:
return None
if result == self.RESULT_INVALIDCREDENTIALS:
return "Invalid credentials."
return "Unknown error."
|
# -*- coding: utf-8 -*-
import random as rand
from library.welcomeScreen import welcome
from library.farwell import showTotalScoreBye
# https://stackoverflow.com/questions/20309456/call-a-function-from-another-file
name, questionsNum = welcome()
questionsKeeper = 1
rightAns = 0
progress = ''
while questionsKeeper <= questionsNum:
questionsKeeper += 1
numbers = ['ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT', 'NINE', 'TEN',
'ELEVEN', 'TWELVE', 'THIRTEEN', 'FOURTEEN', 'FIFTEEN', 'SIXTEEN', 'SEVENTEEN', 'EIGHTEEN', 'NINETEEN', 'TWENTY']
randIntGen = rand.randint(1, numbers.__len__() +1 )
qNumbers = numbers[randIntGen]
qIn = 'a'
while qIn != qNumbers:
qIn = str(input("Type {} ? ".format(qNumbers)))
if qIn == qNumbers:
print(" :)")
print("")
progress = str(progress) + '='
print("{}".format(progress))
print("")
rightAns += 1
break
else:
print(" :(")
print("")
showTotalScoreBye(rightAns, questionsNum, name)
|
from sortingandsearching.count_values_in_range import *
class TestCountValuesInRange:
def test_arrays_of_length_2_or_less(self):
assert count([1], lower=1, upper=2) == 1
assert count([1], lower=2, upper=2) == 0
assert count([2, 1], lower=1, upper=2) == 2
assert count([3, 1], lower=1, upper=2) == 1
assert count([3, 4], lower=1, upper=2) == 0
def test_arrays_of_length_3_or_greater(self):
assert count([1, 3, 3, 9, 10, 4], lower=1, upper=4) == 4
assert count([1, 3, 3, 9, 10, 4], lower=9, upper=12) == 2
assert count([1, 3, 3, 9, 10, 4], lower=4, upper=10) == 3
|
# -*- coding: utf-8 -*-
#
"""
Partie arithmetique du module lycee.
"""
def pgcd(a, b):
"""Renvoie le Plus Grand Diviseur Communs des entiers ``a`` et ``b``.
Arguments:
a (int) : un nombre entier
b (int) : un nombre entier
"""
if a < 0 or b < 0:
return pgcd(abs(a), abs(b))
if b == 0:
if a == 0:
raise ZeroDivisionError(
"Le PGCD de deux nombres nuls n'existe pas")
return a
return pgcd(b, a % b)
def reste(a, b):
"""Renvoie le reste de la division de ``a`` par ``b``.
Arguments:
a (int): Un nombre entier.
b (int): Un nombre entier non nul.
"""
r = a % b
if r < 0:
r = r + abs(b)
return r
def quotient(a, b):
"""Le quotient de la division de ``a`` par ``b``.
Arguments:
a (int): Un nombre entier.
b (int): Un nombre entier non nul.
"""
return a // b
|
# -*- coding:utf-8 -*-
from apus.users.models import Users
from apus.config.db import save_in_db, query
def create_user():
print("Criar novo usuário")
username = input('username: ')
email = input('email: ')
user = Users(username=username, email=email)
save_in_db(user)
def list_users():
print("Listagem de usuários: ")
usu = query(Users)
for i, u in enumerate(usu.all()):
print(i, u)
|
import json
import boto3
import dynamo_handler
import yelp_handler
import sqs_handler
import sns_handler
def lambda_handler(event, context):
#api_key= Hidden Due to security reasons
#queue_url = Hidden Due to security reasons
slot_list,message_list = sqs_handler.message_handle(queue_url)
if slot_list == "False":
print("The Queue is Empty")
return "Queue is Empty"
print("The slot list is "+str(slot_list))
suggestions = []
count = 0
for element in slot_list:
print("The element is "+str(element))
suggestions = yelp_handler.get_info_yelp(element, api_key)
request_number = str(element['phone']) +"/" +str(element['date']) +"/" +str(element['time'])
print("The element is "+str(element) + "and request_number: "+str(request_number))
#Insert in to Dynamo DB
dynamo_handler.insert_into_table(suggestions, message_list[count], request_number)
# # #Send SNS with Suggestions
phone_number = str(element['phone'])
restaurant_suggestions = []
for i in suggestions:
restaurant_suggestions.append(i['restaurant_name'])
sns_handler.send_message(phone_number,restaurant_suggestions)
return "Hello"
|
import gc
import io
import os
import piexif
import pytest
from PIL import Image, ImageCms
import pillow_heif
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
def to_pillow_image(heif_file):
return Image.frombytes(
heif_file.mode,
heif_file.size,
heif_file.data,
"raw",
heif_file.mode,
heif_file.stride,
)
@pytest.mark.parametrize(
['folder', 'image_name'],
[
('Pug', 'PUG1.HEIC',),
('Pug', 'PUG2.HEIC',),
('Pug', 'PUG3.HEIC',),
('hif', '93FG5559.HIF',),
('hif', '93FG5564.HIF',),
]
)
def test_check_filetype(folder, image_name):
fn = os.path.join(TESTS_DIR, 'images', folder, image_name)
filetype = pillow_heif.check(fn)
assert pillow_heif.heif_filetype_no != filetype
assert pillow_heif.heif_filetype_yes_unsupported != filetype
@pytest.mark.parametrize(
['folder', 'image_name'],
[
('Pug', 'PUG1.HEIC',),
('Pug', 'PUG2.HEIC',),
('Pug', 'PUG3.HEIC',),
('hif', '93FG5559.HIF',),
('hif', '93FG5564.HIF',),
]
)
def test_read_paths(folder, image_name):
fn = os.path.join(TESTS_DIR, 'images', folder, image_name)
heif_file = pillow_heif.read(fn)
assert heif_file is not None
width, height = heif_file.size
assert width > 0
assert height > 0
assert heif_file.brand != pillow_heif.constants.heif_brand_unknown_brand
assert len(heif_file.data) > 0
@pytest.mark.parametrize(
['folder', 'image_name'],
[
('Pug', 'PUG1.HEIC',),
('Pug', 'PUG2.HEIC',),
('Pug', 'PUG3.HEIC',),
('hif', '93FG5559.HIF',),
('hif', '93FG5564.HIF',),
]
)
def test_read_file_objects(folder, image_name):
fn = os.path.join(TESTS_DIR, 'images', folder, image_name)
with open(fn, "rb") as f:
heif_file = pillow_heif.read(f)
assert heif_file is not None
width, height = heif_file.size
assert width > 0
assert height > 0
assert heif_file.brand != pillow_heif.constants.heif_brand_unknown_brand
assert len(heif_file.data) > 0
@pytest.mark.parametrize(
['folder', 'image_name'],
[
('Pug', 'PUG1.HEIC',),
('Pug', 'PUG2.HEIC',),
('Pug', 'PUG3.HEIC',),
('hif', '93FG5559.HIF',),
('hif', '93FG5564.HIF',),
]
)
def test_read_bytes(folder, image_name):
fn = os.path.join(TESTS_DIR, 'images', folder, image_name)
with open(fn, "rb") as f:
d = f.read()
heif_file = pillow_heif.read(d)
assert heif_file is not None
width, height = heif_file.size
assert width > 0
assert height > 0
assert heif_file.brand != pillow_heif.constants.heif_brand_unknown_brand
assert len(heif_file.data) > 0
@pytest.mark.parametrize(
['folder', 'image_name'],
[
('Pug', 'PUG1.HEIC',),
('Pug', 'PUG2.HEIC',),
('Pug', 'PUG3.HEIC',),
('hif', '93FG5559.HIF',),
('hif', '93FG5564.HIF',),
]
)
def test_read_bytearrays(folder, image_name):
fn = os.path.join(TESTS_DIR, 'images', folder, image_name)
with open(fn, "rb") as f:
d = f.read()
d = bytearray(d)
heif_file = pillow_heif.read(d)
assert heif_file is not None
width, height = heif_file.size
assert width > 0
assert height > 0
assert heif_file.brand != pillow_heif.constants.heif_brand_unknown_brand
assert len(heif_file.data) > 0
@pytest.mark.parametrize(
['folder', 'image_name'],
[
('Pug', 'PUG1.HEIC',),
('Pug', 'PUG2.HEIC',),
('Pug', 'PUG3.HEIC',),
('hif', '93FG5559.HIF',),
('hif', '93FG5564.HIF',),
]
)
def test_read_exif_metadata(folder, image_name):
fn = os.path.join(TESTS_DIR, 'images', folder, image_name)
heif_file = pillow_heif.read(fn)
for m in heif_file.metadata or []:
if m["type"] == "Exif":
exif_dict = piexif.load(m["data"])
assert "0th" in exif_dict
assert len(exif_dict["0th"]) > 0
assert "Exif" in exif_dict
assert len(exif_dict["Exif"]) > 0
@pytest.mark.parametrize(
['folder', 'image_name', 'expected_color_profile'],
[
('Pug', 'PUG1.HEIC', 'prof',),
('Pug', 'PUG2.HEIC', 'prof',),
('Pug', 'PUG3.HEIC', None,),
('hif', '93FG5559.HIF', 'nclx',),
('hif', '93FG5564.HIF', 'nclx',),
]
)
def test_read_icc_color_profile(folder, image_name, expected_color_profile):
fn = os.path.join(TESTS_DIR, 'images', folder, image_name)
heif_file = pillow_heif.read(fn)
if expected_color_profile:
assert heif_file.color_profile["type"] is expected_color_profile
else:
assert heif_file.color_profile is None
if heif_file.color_profile and heif_file.color_profile["type"] in ["prof", "rICC", ]:
profile = io.BytesIO(heif_file.color_profile["data"])
cms = ImageCms.getOpenProfile(profile)
@pytest.mark.parametrize(
['folder', 'image_name'],
[
('Pug', 'PUG1.HEIC',),
('Pug', 'PUG2.HEIC',),
('Pug', 'PUG3.HEIC',),
('hif', '93FG5559.HIF',),
('hif', '93FG5564.HIF',),
]
)
def test_read_pillow_frombytes(folder, image_name):
fn = os.path.join(TESTS_DIR, 'images', folder, image_name)
heif_file = pillow_heif.read(fn)
image = to_pillow_image(heif_file)
# @pytest.mark.parametrize(
# ['folder', 'image_name'],
# [
# ('', 'arrow.heic',),
# ]
# )
# def test_no_transformations(folder, image_name):
# fn = os.path.join(TESTS_DIR, 'images', folder, image_name)
# transformed = pillow_heif.read(fn)
# native = pillow_heif.read(fn, apply_transformations=False)
# assert transformed.size[0] != transformed.size[1]
# assert transformed.size == native.size[::-1]
# transformed = to_pillow_image(transformed)
# native = to_pillow_image(native)
# assert transformed == native.transpose(Image.ROTATE_270)
@pytest.mark.parametrize(
['folder', 'image_name'],
[
('hif', '93FG5559.HIF',),
('hif', '93FG5564.HIF',),
]
)
def test_read_10_bit__everywhere(folder, image_name):
fn = os.path.join(TESTS_DIR, 'images', folder, image_name)
heif_file = pillow_heif.read(fn)
image = to_pillow_image(heif_file)
@pytest.mark.parametrize(
['folder', 'image_name', 'has_metadata', 'has_profile'],
[
('Pug', 'PUG1.HEIC', True, True,),
('Pug', 'PUG3.HEIC', False, False,),
('hif', '93FG5559.HIF', True, True,),
]
)
def test_open_and_load__everywhere(folder, image_name, has_metadata, has_profile):
last_metadata = None
last_color_profile = None
fn = os.path.join(TESTS_DIR, 'images', folder, image_name)
heif_file = pillow_heif.open(fn)
assert heif_file.size[0] > 0
assert heif_file.size[1] > 0
assert heif_file.has_alpha is not None
assert heif_file.mode is not None
assert heif_file.bit_depth is not None
assert heif_file.data is None
assert heif_file.stride is None
if heif_file.metadata:
last_metadata = heif_file.metadata[0]
if heif_file.color_profile:
last_color_profile = heif_file.color_profile
res = heif_file.load()
assert heif_file is res
assert heif_file.data is not None
assert heif_file.stride is not None
assert len(heif_file.data) >= heif_file.stride * heif_file.size[1]
assert type(heif_file.data[:100]) == bytes
# Subsequent calls don't change anything
res = heif_file.load()
assert heif_file is res
assert heif_file.data is not None
assert heif_file.stride is not None
if has_metadata:
assert last_metadata is not None
else:
assert last_metadata is None
if has_profile:
assert last_color_profile is not None
else:
assert last_color_profile is None
@pytest.mark.parametrize(
['folder', 'image_name'],
[
('Pug', 'PUG1.HEIC',),
('hif', '93FG5559.HIF',),
]
)
def test_open_and_load_data_collected__everywhere(folder, image_name):
fn = os.path.join(TESTS_DIR, 'images', folder, image_name)
with open(fn, "rb") as f:
data = f.read()
heif_file = pillow_heif.open(data)
# heif_file.load() should work even if there is no other refs to the source data.
data = None
gc.collect()
heif_file.load()
|
from chass.locate_commands import locate_commands
import subprocess
def sedcommand (thepassedfile, commands, params):
f = open(thepassedfile,"r+")
g = open("copy2.sh","w+")
sed_count=0
lines = f.readlines()
sed_ls = []
for a,b in commands:
if(b=="sed"):
sed_ls.append(a)
for index in sed_ls:
lines[index] = lines[index].strip("\n")
lines[index]+= ">> sedfile"+str(sed_count)+".txt\n"
sed_count+=1
g.write(''.join(lines))
f.close()
g.close()
temporary = open("garbage_file.txt","a")
temporary.flush()
subprocess.Popen(["bash","copy2"+".sh"]+params,stdout=temporary,stderr=subprocess.STDOUT)
# commands = locate_commands(thepassedfile)
|
import math
from rlbot.agents.base_agent import SimpleControllerState
from action.base_action import BaseAction
from mechanic.drive_navigate_boost import DriveNavigateBoost
class Kickoff(BaseAction):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mechanic = DriveNavigateBoost(self.agent, self.rendering_enabled)
def get_controls(self, game_data) -> SimpleControllerState:
self.controls = self.mechanic.step(game_data.my_car, game_data.boost_pads, game_data.ball.location)
ball_loc = game_data.ball.location
kickoff = math.sqrt(ball_loc[0] ** 2 + ball_loc[1] ** 2) < 1
if not kickoff:
self.finished = True
else:
self.finished = False
return self.controls
|
import logging
import os
from pathlib2 import Path
from os import makedirs, path
class LoggingFileHandler(logging.FileHandler):
def __init__(self, dir_name, fileName, mode):
if not Path(dir_name).exists():
makedirs(dir_name)
file_path = path.join(dir_name, str(os.getpid()) + "_" + fileName)
super(LoggingFileHandler, self).__init__(file_path, mode)
|
import media
import fresh_tomatoes
# Create instances of class `Movie` - for each movie.
the_terminator = media.Movie("The Terminator",
"1 hour 48 minutes",
"The story of a cyborg sent past in time.",
"Sci-fi, Action",
"26 October 1984",
"8.0",
"https://goo.gl/J8m6ww",
"https://youtu.be/c4Jo8QoOTQ4")
avatar = media.Movie("Avatar",
"2 hours 42 minutes",
"A marine on an alien planet.",
"Action, Adventure, Sci-fi, Fantasy",
"18 December 2009",
"7.8",
"https://goo.gl/eTxWYy",
"https://www.youtube.com/watch?v=5PSNL1qE6VY")
avengers3 = media.Movie("Avengers: Infinity War",
"2 hours 36 minutes",
"Avengers assemble to defeat Thanos.",
"Sci-fi, Action",
"25 April 2018",
"8.2",
"https://goo.gl/YSEU5c",
"https://youtu.be/6ZfuNTqbHE8")
maze_runner2 = media.Movie("Maze Runner 2",
"2 hours 13 minutes",
"Few selected ones save themselves from "
"an organisation called WCKD.",
"Sci-Fi, Dystopian, Adventure",
"18 September 2015",
"6.3",
"https://goo.gl/8XtvMQ",
"https://youtu.be/SDofO3P2HpE")
thor_ragnarok = media.Movie("Thor Ragnarok",
"2 hours 10 minutes",
"Thor fights his evil sister, Hela, "
"and protects Asgard from her.",
"Comedy, Fantasy, Action",
"3 November 2017",
"8.0",
"https://goo.gl/5SkAsT",
"https://youtu.be/v7MGUNV8MxU")
justice_league = media.Movie("Justice League",
"2 hours",
"The Justice League assembles to defeat "
"Darkseid.",
"Action, Sci-Fi, Fantasy",
"15 November 2017",
"6.8",
"https://goo.gl/x1BpEi",
"https://youtu.be/r9-DM9uBtVI")
# Create a list of Movie instances.
movies = [the_terminator, avatar, avengers3, maze_runner2,
thor_ragnarok, justice_league]
# Generate a Web page by passing the list of Movie instances.
fresh_tomatoes.open_movies_page(movies)
|
import time
from concurrent.futures import (
CancelledError,
ProcessPoolExecutor,
ThreadPoolExecutor,
)
import pytest
from bbhnet.parallelize import AsyncExecutor, as_completed
def func(x):
return x**2
def func_with_args(x, i):
return x**i
def sleepy_func(t):
time.sleep(t)
return t
@pytest.fixture(params=[list, dict])
def container(request):
values = range(10)
if request.param is list:
return list(values)
else:
letters = "abcdefghij"
return {i: [j] for i, j in zip(letters, values)}
@pytest.fixture(params=[ThreadPoolExecutor, ProcessPoolExecutor])
def pool_type(request):
return request.param
def test_as_completed(container, pool_type):
futures = []
with pool_type(2) as ex:
if isinstance(container, dict):
futures = {
i: [ex.submit(func, k)]
for i, j in container.items()
for k in j
}
else:
futures = [ex.submit(func, i) for i in container]
for result in as_completed(futures):
if isinstance(container, dict):
letter, value = result
letters = sorted(container.keys())
assert value == letters.index(letter) ** 2
else:
assert 0 <= result**0.5 <= 9
def test_async_executor(pool_type):
ex = AsyncExecutor(2, thread=pool_type is ThreadPoolExecutor)
with pytest.raises(ValueError):
ex.submit(func, 0)
with ex:
future = ex.submit(func, 2)
assert future.result() == 4
# test imap
it = ex.imap(func, range(10))
results = sorted([i for i in it])
assert all([i == j**2 for i, j in zip(results, range(10))])
it = ex.imap(func_with_args, range(10), i=3)
results = sorted([i for i in it])
assert all([i == j**3 for i, j in zip(results, range(10))])
# submit more jobs than workers so that
# the last will get started even if the
# context exits as long as there's no error
futures = [ex.submit(sleepy_func, 0.1) for i in range(3)]
tstamp = time.time()
# make sure that the context didn't move on until
# everything finished executing
assert (time.time() - tstamp) > 0.1
# make sure all the jobs got executed
assert all([f.result() == 0.1 for f in futures])
# make sure that we have no more executor
with pytest.raises(ValueError):
ex.submit(func, 0)
try:
ex = AsyncExecutor(2, thread=pool_type is ThreadPoolExecutor)
with ex:
# pad with a couple extra to make sure that
# one doesn't sneak in
futures = [ex.submit(sleepy_func, 0.1) for i in range(5)]
raise ValueError
except ValueError:
assert all([f.result() == 0.1 for f in futures[:2]])
with pytest.raises(CancelledError):
futures[-1].result()
|
from ._ybins import YBins
from ._xbins import XBins
from ._stream import Stream
from ._marker import Marker
from ._line import Line
from ._hoverlabel import Hoverlabel
from plotly.graph_objs.histogram2dcontour import hoverlabel
from ._contours import Contours
from plotly.graph_objs.histogram2dcontour import contours
from ._colorbar import ColorBar
from plotly.graph_objs.histogram2dcontour import colorbar
|
"""Functions"""
import os
import sys
from sys import exit, stderr, argv, path, modules
from os.path import isfile, isdir, realpath, dirname, exists
import numpy as np
import pandas as pd
# plotting
import matplotlib
import seaborn.apionly as sns
from matplotlib import rc
import matplotlib.lines as mlines
import pylab as plt
import mrestimator as mre
from scipy.optimize import bisect
import random
CODE_DIR = '{}/..'.format(dirname(realpath(__file__)))
path.insert(1, '{}/src'.format(CODE_DIR))
if 'hde_glm' not in modules:
import hde_glm as glm
import hde_utils as utl
import hde_plotutils as plots
def get_p_spike_cond_eval(past, m, h, l):
l = len(past)
past_activation = np.sum(m/l*past)
# past_activation = m*np.dot(np.exp(-np.arange(1,l+1)),past)/np.sum(np.exp(-np.arange(1,l+1)))
p_spike_cond = h + (1-h)*past_activation
return p_spike_cond
# in the dynamics, the decision h or 1-h is already taken when we compute the internal activation
def get_p_spike_cond_dyn(past, m, l):
l = len(past)
past_activation = np.sum(m/l*past)
# past_activation = m*np.dot(np.exp(-np.arange(1,l+1)),past)/np.sum(np.exp(-np.arange(1,l+1)))
p_spike_cond = past_activation
return p_spike_cond
def get_p_spike_cond_eval_dict(m, h, l):
p_spike_cond = {}
for i in range(2**l):
past = get_binary_past_from_integer(i,l)
p_spike_cond[i] = get_p_spike_cond_eval(past, m, h, l)
return p_spike_cond
def get_p_spike_cond_dyn_dict(m, h, l):
p_spike_cond = {}
for i in range(2**l):
past = get_binary_past_from_integer(i,l)
p_spike_cond[i] = get_p_spike_cond_dyn(past, m, l)
return p_spike_cond
def get_binary_past_from_integer(x, l):
past = np.zeros(l).astype(int)
for i in range(l):
state = x%(2**(i+1))
if state > 0:
past[i] = 1
x -= 2**i
return past
def simulate_spiketrain(N_steps, m, h, l):
# initiate arrays:
p_spike_cond = get_p_spike_cond_dyn_dict(m, h, l)
signature = np.array([2**i for i in range(l)])
past_states = np.zeros(N_steps)
spikes = np.random.choice([0,1], N_steps, p = [1-h, h])
past = np.dot(np.array([np.roll(spikes,k) for k in np.arange(1,l+1)]).T,signature)
past[:l] = 0
for i in range(N_steps):
if past[i] > 0:
if spikes[i] == 0:
if random.random() < p_spike_cond[past[i]]:
spikes[i] = 1
for k in range(l):
if i+k+1 < N_steps:
past[i+k+1] += 2**k
return spikes, past
def get_h_first_order(p_spike, m):
return p_spike*(1-m)/(1-m*p_spike)
def get_spike_entropy(p_spike):
p_nospike = 1 - p_spike
if p_nospike == 1.0:
entropy = 0
else:
entropy = - p_spike * np.log2(p_spike) - p_nospike * np.log2(p_nospike)
return entropy
def R_first_order(m,h):
p_spike = h/(1-m+h*m)
p_nospike = 1 - p_spike
spike_entropy = get_spike_entropy(p_spike)
p_spike_past_spike = h + (1-h)*m
p_spike_no_past_spike = h
cond_entropy_past_spike = - p_spike_past_spike * np.log2(p_spike_past_spike) - (1-p_spike_past_spike) * np.log2(1-p_spike_past_spike)
cond_entropy_no_past_spike = - p_spike_no_past_spike * np.log2(p_spike_no_past_spike) - (1-p_spike_no_past_spike) * np.log2(1-p_spike_no_past_spike)
cond_entropy = p_spike * cond_entropy_past_spike + p_nospike * cond_entropy_no_past_spike
I_pred = spike_entropy - cond_entropy
R = I_pred/spike_entropy
return R, I_pred
def get_R(spikes, past, p_spike_cond, l):
N_steps = len(spikes)
p_spike = np.sum(spikes)/N_steps
p_nospike = 1 - p_spike
spike_entropy = get_spike_entropy(p_spike)
# historgram
counts = np.histogram(past, bins = 2**l, range = (0,2**l))[0]
cond_entropy = np.sum([counts[i]*get_spike_entropy(p_spike_cond[i]) for i in range(2**l)])/N_steps
I_pred = spike_entropy - cond_entropy
R = I_pred/spike_entropy
return R, I_pred
def get_R_plugin(spikes, past, l):
N_steps = float(len(spikes))
p_spike = np.sum(spikes)/N_steps
p_nospike = 1 - p_spike
spike_entropy = get_spike_entropy(p_spike)
# How to preprocess past such that only the first l bins matter?
past = past % 2**l
counts_past = np.histogram(past, bins = 2**l, range = (0,2**l))[0]
counts_joint = np.histogram(past + spikes * 2**(l), bins = 2**(l+1), range = (0,2**(l+1)))[0]
past_entropy = np.sum(-counts_past/N_steps*np.log2(counts_past/N_steps))
joint_entropy = np.sum(-counts_joint/N_steps*np.log2(counts_joint/N_steps))
I_pred = spike_entropy + past_entropy - joint_entropy
R = I_pred/spike_entropy
return R, I_pred
def get_auto_correlation_time(spikes, min_steps, max_steps, bin_size_ms):
rk = mre.coefficients(spikes, dt=bin_size_ms, steps = (min_steps, max_steps))
T = (rk.steps-rk.steps[0]+1)*bin_size_ms
fit = mre.fit(rk, steps = (min_steps,max_steps),fitfunc = mre.f_exponential)
tau_C = fit.tau
# rk_offset = fit.popt[2]
C_raw = rk.coefficients
range = int(6*tau_C)
tau_C_int = plots.get_T_avg(T[:range+1], C_raw[:range+1], 0)
print(tau_C, tau_C_int)
return tau_C_int, rk, T, fit
def get_lagged_MI(spikes, t):
N = len(spikes)
p_spike = np.sum(spikes)/N
p_nospike = 1 - p_spike
spike_entropy = get_spike_entropy(p_spike)
# the t first bins do not have a past
N -= t
past = np.roll(spikes,t)
counts_joint = np.histogram(past[t:] + spikes[t:] * 2, bins = 4, range = (0,4))[0]
joint_entropy = np.sum(-counts_joint/N*np.log2(counts_joint/N))
lagged_MI = 2*spike_entropy - joint_entropy
return lagged_MI/spike_entropy
def get_tau_lagged_MI(spikes, max_steps):
T_lagged_MI = np.arange(1,max_steps)
lagged_MI_arr = []
for t in T_lagged_MI:
lagged_MI = get_lagged_MI(spikes, t)
lagged_MI_arr += [lagged_MI]
# add 0.5 because get_T_avg averags points in the center of bins, but here the smallest time step is the bin size so we want to average at the edges
tau_lagged_MI = plots.get_T_avg(T_lagged_MI, np.array(lagged_MI_arr), 0) + 0.5
return tau_lagged_MI, lagged_MI_arr, T_lagged_MI
def get_tau_R(spikes, past, l, R_tot):
T = np.arange(1,l+1)
R_arr = []
for t in T:
R_plugin = get_R_plugin(spikes, past, t)[0]
R_arr += [R_plugin]
dR_arr = plots.get_dR(T,R_arr,R_tot)
# add 0.5 because get_T_avg averags points in the center of bins, but here the smallest time step is the bin size so we want to average at the edges
tau_R = plots.get_T_avg(T, dR_arr, 0) + 0.5
return tau_R, R_arr, dR_arr, T
def save_m_and_h_lists(l_max, m_baseline, p_spike_target, adaptation_rate_m, adaptation_rate_h, N_simulation_steps, N_adaptation_steps, annealing_factor):
nu_m = adaptation_rate_m # adaptation rate for m
nu_h = adaptation_rate_h # adaptation rate for h
h_baseline = p_spike_target*(1-m_baseline)/(1-m_baseline*p_spike_target)
l_list = np.arange(1,l_max+1)
R_target = R_first_order(m_baseline,h_baseline)[0]
p_spike_list = [p_spike_target]
R_list = [R_target]
m_list = [m_baseline]
h_list = [h_baseline]
m = m_baseline
h = h_baseline
for l in l_list[1:]:
N_steps = int(N_simulation_steps/2)
nu_m = adaptation_rate_m # adaptation rate for m
nu_h = adaptation_rate_h # adaptation rate for h
for i in range(N_adaptation_steps):
if i%5 == 0:
p_spike_cond = get_p_spike_cond_eval_dict(m, h, l)
spikes, past = simulate_spiketrain(N_steps, m, h, l)
R = get_R(spikes, past, p_spike_cond, l)[0]
# Adapt m
m += nu_m * (R_target-R)
# h_target = p_spike_target*(1-m)/(1-m*p_spike_target)
p_spike_cond = get_p_spike_cond_eval_dict(m, h, l)
spikes, past = simulate_spiketrain(N_steps, m, h, l)
# Compute firing rate
p_spike = np.sum(spikes)/N_steps
h += nu_h * (p_spike_target - p_spike)
print(l, p_spike/0.005, R)
if i%10 ==0:
nu_m = nu_m * annealing_factor
if i%5 ==0:
nu_h = nu_h * annealing_factor
# print(rate, R)
N_steps = N_simulation_steps
p_spike_cond = get_p_spike_cond_eval_dict(m, h, l)
spikes, past = simulate_spiketrain(N_steps, m, h, l)
# Compute firing rate
p_spike = np.sum(spikes)/N_steps
R = get_R(spikes, past, p_spike_cond, l)[0]
p_spike_list+=[p_spike]
R_list += [R]
m_list += [m]
h_list += [h]
plt.plot(l_list,R_list)
plt.show()
plt.close()
plt.plot(l_list,p_spike_list)
plt.show()
plt.close()
np.save('{}/analysis/binary_AR/l_list.npy'.format(CODE_DIR),l_list)
np.save('{}/analysis/binary_AR/m_list.npy'.format(CODE_DIR),m_list)
np.save('{}/analysis/binary_AR/h_list.npy'.format(CODE_DIR),h_list)
np.save('{}/analysis/binary_AR/R_list.npy'.format(CODE_DIR),R_list)
np.save('{}/analysis/binary_AR/p_spike_list.npy'.format(CODE_DIR),p_spike_list)
"""Parameters"""
t_bin = 0.005
N_simulation_steps = 10000000
target_rate = 5 # in Hz
min_rate = 0.5 # in Hz
max_rate = 10 # in Hz
p_spike_target = target_rate * t_bin
p_spike_min = min_rate * t_bin
p_spike_max = max_rate * t_bin
min_steps_autocorrelation = 1
max_steps_autocorrelation = 150
bin_size_ms = 1.
# m has to be smaller than one: m<1
m_baseline = 0.8
l_max = 10
# Checking if analytical R and firing rate agree with the simulated ones.
if argv[1] == 'check':
l = 1
h = get_h_first_order(p_spike_target, m_baseline)
p_spike_cond = get_p_spike_cond_eval_dict(m_baseline, h, l)
spikes, past = simulate_spiketrain(N_simulation_steps, m_baseline, h, l)
p_spike = np.sum(spikes)/N_simulation_steps
rate = p_spike/t_bin
R = get_R(spikes, past, p_spike_cond, l)[0]
R_analytic =R_first_order(m_baseline,h)[0]
print(rate, R, R_analytic)
# Adapt m and h to keep R and h fixed for different l
if argv[1] == 'adapt_m_and_h':
N_adaptation_steps = 50
adaptation_rate_m = 0.3
learning_rate_h = 0.1
annealing_factor = 0.85
save_m_and_h_lists(l_max, m_baseline, p_spike_target, adaptation_rate_m, learning_rate_h, N_simulation_steps, N_adaptation_steps, annealing_factor)
# Compute R_tot, tau_R, tau_C, tau_L versus l for fixed R_tot, and fixed rate = 5Hz
if argv[1] == 'vs_l':
R_tot_list = []
tau_R_list = []
tau_lagged_MI_list = []
tau_C_list = []
for i,l in enumerate(l_list):
m = m_list[l-1]
h = h_list[l-1]
p_spike_cond = get_p_spike_cond_eval_dict(m, h, l)
spikes, past = simulate_spiketrain(N_simulation_steps, m, h, l)
R_tot = get_R(spikes, past, p_spike_cond, l)[0]
tau_R = get_tau_R(spikes, past, l, R_tot)[0]
# get autocorrelation measures
tau_C = get_auto_correlation_time(spikes, min_steps_autocorrelation, max_steps_autocorrelation, bin_size_ms)[0]
# get lagged MI measures
tau_lagged_MI = get_tau_lagged_MI(spikes, int(tau_C)*5)[0]
print(l, tau_C, tau_lagged_MI, tau_R)
R_tot_list += [R_tot]
tau_R_list += [tau_R]
tau_lagged_MI_list += [tau_lagged_MI]
tau_C_list += [tau_C]
tau_R_list = np.array(tau_R_list)
tau_C_list = np.array(tau_C_list)
tau_lagged_MI_list = np.array(tau_lagged_MI_list)
np.save('{}/analysis/binary_AR/tau_R_vs_l.npy'.format(CODE_DIR), tau_R_list)
np.save('{}/analysis/binary_AR/tau_C_vs_l.npy'.format(CODE_DIR), tau_C_list)
np.save('{}/analysis/binary_AR/tau_lagged_MI_vs_l.npy'.format(CODE_DIR), tau_lagged_MI_list)
np.save('{}/analysis/binary_AR/R_tot_vs_l.npy'.format(CODE_DIR), R_tot_list)
# Compute R_tot, tau_R, tau_C, tau_L versus m for fixed l=1, and fixed rate = 5Hz
if argv[1] == 'vs_m':
R_tot_list = []
tau_R_list = []
tau_lagged_MI_list = []
tau_C_list = []
l = 1
m_list = np.linspace(0.5,0.95,15)
for i,m in enumerate(m_list):
h = p_spike_target*(1-m)/(1-m*p_spike_target)
p_spike_cond = get_p_spike_cond_eval_dict(m, h, l)
spikes, past = simulate_spiketrain(N_simulation_steps, m, h, l)
R_tot = R_first_order(m,h)[0]
tau_R = get_tau_R(spikes, past, l, R_tot)[0]
# get autocorrelation measures
tau_C = get_auto_correlation_time(spikes, min_steps_autocorrelation, max_steps_autocorrelation, bin_size_ms)[0]
# get lagged MI measures
tau_lagged_MI = get_tau_lagged_MI(spikes, int(tau_C)*5)[0]
print(l, tau_C, tau_lagged_MI, tau_R)
R_tot_list += [R_tot]
tau_R_list += [tau_R]
tau_lagged_MI_list += [tau_lagged_MI]
tau_C_list += [tau_C]
tau_R_list = np.array(tau_R_list)
tau_C_list = np.array(tau_C_list)
tau_lagged_MI_list = np.array(tau_lagged_MI_list)
np.save('{}/analysis/binary_AR/tau_R_vs_m.npy'.format(CODE_DIR), tau_R_list)
np.save('{}/analysis/binary_AR/tau_C_vs_m.npy'.format(CODE_DIR), tau_C_list)
np.save('{}/analysis/binary_AR/tau_lagged_MI_vs_m.npy'.format(CODE_DIR), tau_lagged_MI_list)
np.save('{}/analysis/binary_AR/R_tot_vs_m.npy'.format(CODE_DIR), R_tot_list)
|
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2021
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the abstract estimator `JaxEstimator` for Jax models.
"""
import logging
import numpy as np
from art.estimators.estimator import BaseEstimator, LossGradientsMixin, NeuralNetworkMixin
logger = logging.getLogger(__name__)
class JaxEstimator(NeuralNetworkMixin, LossGradientsMixin, BaseEstimator):
"""
Estimator class for Jax models.
"""
estimator_params = BaseEstimator.estimator_params + NeuralNetworkMixin.estimator_params
def __init__(self, **kwargs) -> None:
"""
Estimator class for Jax models.
:param channels_first: Set channels first or last.
:param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and
maximum values allowed for features. If floats are provided, these will be used as the range of all
features. If arrays are provided, each value will be considered the bound for a feature, thus
the shape of clip values needs to match the total number of features.
:param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
:param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
:param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be
used for data preprocessing. The first value will be subtracted from the input. The input will then
be divided by the second one.
:param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`.
"""
super().__init__(**kwargs)
JaxEstimator._check_params(self)
def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs):
"""
Perform prediction of the neural network for samples `x`.
:param x: Samples of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,
nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).
:param batch_size: Batch size.
:return: Predictions.
:rtype: Format as expected by the `model`
"""
return NeuralNetworkMixin.predict(self, x, batch_size=batch_size, **kwargs)
def fit(self, x: np.ndarray, y, batch_size: int = 128, nb_epochs: int = 20, **kwargs) -> None:
"""
Fit the model of the estimator on the training data `x` and `y`.
:param x: Samples of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,
nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).
:param y: Target values.
:type y: Format as expected by the `model`
:param batch_size: Batch size.
:param nb_epochs: Number of training epochs.
"""
NeuralNetworkMixin.fit(self, x, y, batch_size=batch_size, nb_epochs=nb_epochs, **kwargs)
def set_params(self, **kwargs) -> None:
"""
Take a dictionary of parameters and apply checks before setting them as attributes.
:param kwargs: A dictionary of attributes.
"""
super().set_params(**kwargs)
self._check_params()
def _check_params(self) -> None:
super()._check_params()
|
import sys,os,subprocess,logging
from engine import MatchingEngine
className = "BundlerMatching"
class BundlerMatching(MatchingEngine):
featuresListFileName = "list_features.txt"
executable = ''
def __init__(self, distrDir):
if sys.platform == "win32":
self.executable = os.path.join(distrDir, "bundler/bin/KeyMatchFull.exe")
else:
self.executable = os.path.join(distrDir, "bundler/bin/KeyMatchFull")
logging.info("BundlerMatching executable path: %s" % self.executable)
def match(self):
logging.info("\nPerforming feature matching...")
subprocess.call([self.executable, self.featuresListFileName, self.outputFileName])
|
"""
Реализовать функцию, принимающую два числа (позиционные аргументы) и выполняющую их деление.
Числа запрашивать у пользователя, предусмотреть обработку ситуации деления на ноль.
"""
import sys
# программа умышленно сделана настойчива на ответ.
def ask_repeat(msg=""):
res: bool = False
if len(msg):
answer = input(f"Хотите повторить {msg} [N]? : ") or "N"
else:
answer = input(f"Хотите повторить [N]? : ") or "N"
if answer.capitalize() == "Y":
res = True
else:
res = False
return res
def input_argument():
argument = float(input("Введите делимое: "))
return argument
def input_devider():
mydevider = float(input("Введите делитель: "))
if mydevider == 0.0:
raise RuntimeError("Значение 0 для делителя недопустимо")
return mydevider
def do_devide(argument, mydevider):
result = argument / mydevider
return result
def show_result(result):
print(f"результат {result}")
def exit_with_error(msg, errcode):
print(f"{msg}")
sys.exit(errcode)
def do_calc():
try:
argument = float(input("Введите делимое: "))
except ValueError:
msg = "введено недопустимое значение для делимого"
exit_with_error(msg, 1)
try:
devider = input_devider()
except ValueError:
msg = "введено недопустимое значение для делителя"
exit_with_error(msg, 2)
except RuntimeError as ex:
msg = ex.args[0]
exit_with_error(msg, 3)
res = do_devide(argument, devider)
show_result(res)
def main():
do_calc()
while ask_repeat(""):
do_calc()
if __name__ == "__main__":
main()
|
import torch.nn as nn
import torchquantum as tq
import torchquantum.functional as tqf
import numpy as np
from typing import Iterable
from torchquantum.plugins.qiskit_macros import QISKIT_INCOMPATIBLE_FUNC_NAMES
from torchpack.utils.logging import logger
class QuantumModuleFromOps(tq.QuantumModule):
def __init__(self, ops):
super().__init__()
self.ops = tq.QuantumModuleList(ops)
@tq.static_support
def forward(self, q_device: tq.QuantumDevice):
self.q_device = q_device
for op in self.ops:
op(q_device)
class RandomLayer(tq.QuantumModule):
def __init__(self,
wires,
n_depth=None,
op_types=[tq.RX, tq.RY, tq.RZ, tq.Hadamard, tq.I, tq.CNOT],
seed=None,
):
super().__init__()
self.n_depth = n_depth
self.n_wires = len(wires)
self.op_types= op_types
self.gate_que=[]
self.seed = seed
self.op_list = tq.QuantumModuleList()
if seed is not None:
np.random.seed(seed)
self.op_list = tq.QuantumModuleList()
self.build_block_layer()
def build_block_layer(self):
i=0
while i <(self.n_depth*(self.n_wires)):
idx = np.random.randint(0,len(self.op_types)-1)
op = self.op_types[idx]
self.gate_que.append(idx)
op_wires = i % (self.n_wires)
if idx==(len(self.op_types)-1): #if it is CNOT, need 2 wires
if op_wires!=0:
op=tq.I
self.gate_que.pop()
self.gate_que.append(4) #delete tq.CNOT, add tq.I
else:
op_wires =[0,1]
i=i+1
if op().name in tq.Operator.parameterized_ops:
operation = op(has_params=True, trainable=True, wires=op_wires)
else:
operation = op(wires=op_wires)
self.op_list.append(operation)
i=i+1
op_ctr = tq.CZ
ctr_idx = np.random.randint(self.n_wires-2,self.n_wires-1)
be_ctr_idx_list =list(range(self.n_wires-1))
be_ctr_idx_list.remove(ctr_idx)
be_ctr_idx = np.random.choice(be_ctr_idx_list)
operation_ctr_after = op_ctr(wires=[ctr_idx, be_ctr_idx])
self.op_list.append(operation_ctr_after)
@tq.static_support
def forward(self, q_device: tq.QuantumDevice):
self.q_device = q_device
for op in self.op_list:
op(q_device)
|
"""miIO protocol implementation
This module contains the implementation of the routines to encrypt and decrypt
miIO payloads with a device-specific token.
The payloads to be encrypted (to be passed to a device) are expected to be
JSON objects, the same applies for decryption where they are converted
automatically to JSON objects.
If the decryption fails, raw bytes as returned by the device are returned.
An usage example can be seen in the source of :func:`miio.Device.send`.
If the decryption fails, raw bytes as returned by the device are returned.
"""
import calendar
import datetime
import hashlib
import json
import logging
from typing import Any, Dict, Tuple
from construct import (
Adapter,
Bytes,
Checksum,
Const,
Default,
GreedyBytes,
Hex,
IfThenElse,
Int16ub,
Int32ub,
Pointer,
RawCopy,
Rebuild,
Struct,
)
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
_LOGGER = logging.getLogger(__name__)
class Utils:
""" This class is adapted from the original xpn.py code by gst666 """
@staticmethod
def verify_token(token: bytes):
"""Checks if the given token is of correct type and length."""
if not isinstance(token, bytes):
raise TypeError("Token must be bytes")
if len(token) != 16:
raise ValueError("Wrong token length")
@staticmethod
def md5(data: bytes) -> bytes:
"""Calculates a md5 hashsum for the given bytes object."""
checksum = hashlib.md5()
checksum.update(data)
return checksum.digest()
@staticmethod
def key_iv(token: bytes) -> Tuple[bytes, bytes]:
"""Generate an IV used for encryption based on given token."""
key = Utils.md5(token)
iv = Utils.md5(key + token)
return key, iv
@staticmethod
def encrypt(plaintext: bytes, token: bytes) -> bytes:
"""Encrypt plaintext with a given token.
:param bytes plaintext: Plaintext (json) to encrypt
:param bytes token: Token to use
:return: Encrypted bytes"""
if not isinstance(plaintext, bytes):
raise TypeError("plaintext requires bytes")
Utils.verify_token(token)
key, iv = Utils.key_iv(token)
padder = padding.PKCS7(128).padder()
padded_plaintext = padder.update(plaintext) + padder.finalize()
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend())
encryptor = cipher.encryptor()
return encryptor.update(padded_plaintext) + encryptor.finalize()
@staticmethod
def decrypt(ciphertext: bytes, token: bytes) -> bytes:
"""Decrypt ciphertext with a given token.
:param bytes ciphertext: Ciphertext to decrypt
:param bytes token: Token to use
:return: Decrypted bytes object"""
if not isinstance(ciphertext, bytes):
raise TypeError("ciphertext requires bytes")
Utils.verify_token(token)
key, iv = Utils.key_iv(token)
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend())
decryptor = cipher.decryptor()
padded_plaintext = decryptor.update(ciphertext) + decryptor.finalize()
unpadder = padding.PKCS7(128).unpadder()
unpadded_plaintext = unpadder.update(padded_plaintext)
unpadded_plaintext += unpadder.finalize()
return unpadded_plaintext
@staticmethod
def checksum_field_bytes(ctx: Dict[str, Any]) -> bytearray:
"""Gather bytes for checksum calculation"""
x = bytearray(ctx["header"].data)
x += ctx["_"]["token"]
if "data" in ctx:
x += ctx["data"].data
# print("DATA: %s" % ctx["data"])
return x
@staticmethod
def get_length(x) -> int:
"""Return total packet length."""
datalen = x._.data.length # type: int
return datalen + 32
@staticmethod
def is_hello(x) -> bool:
"""Return if packet is a hello packet."""
# not very nice, but we know that hellos are 32b of length
if "length" in x:
val = x["length"]
else:
val = x.header.value["length"]
return bool(val == 32)
class TimeAdapter(Adapter):
"""Adapter for timestamp conversion."""
def _encode(self, obj, context, path):
return calendar.timegm(obj.timetuple())
def _decode(self, obj, context, path):
return datetime.datetime.utcfromtimestamp(obj)
class EncryptionAdapter(Adapter):
"""Adapter to handle communication encryption."""
def _encode(self, obj, context, path):
"""Encrypt the given payload with the token stored in the context.
:param obj: JSON object to encrypt"""
# pp(context)
return Utils.encrypt(
json.dumps(obj).encode("utf-8") + b"\x00", context["_"]["token"]
)
def _decode(self, obj, context, path):
"""Decrypts the given payload with the token stored in the context.
:return str: JSON object"""
try:
# pp(context)
decrypted = Utils.decrypt(obj, context["_"]["token"])
decrypted = decrypted.rstrip(b"\x00")
except Exception:
_LOGGER.debug("Unable to decrypt, returning raw bytes: %s", obj)
return obj
# list of adaption functions for malformed json payload (quirks)
decrypted_quirks = [
# try without modifications first
lambda decrypted_bytes: decrypted_bytes,
# powerstrip returns malformed JSON if the device is not
# connected to the cloud, so we try to fix it here carefully.
lambda decrypted_bytes: decrypted_bytes.replace(
b',,"otu_stat"', b',"otu_stat"'
),
# xiaomi cloud returns malformed json when answering _sync.batch_gen_room_up_url
# command so try to sanitize it
lambda decrypted_bytes: decrypted_bytes[: decrypted_bytes.rfind(b"\x00")]
if b"\x00" in decrypted_bytes
else decrypted_bytes,
]
for i, quirk in enumerate(decrypted_quirks):
decoded = quirk(decrypted).decode("utf-8")
try:
return json.loads(decoded)
except Exception as ex:
# log the error when decrypted bytes couldn't be loaded
# after trying all quirk adaptions
if i == len(decrypted_quirks) - 1:
_LOGGER.error("unable to parse json '%s': %s", decoded, ex)
return None
Message = Struct(
# for building we need data before anything else.
"data" / Pointer(32, RawCopy(EncryptionAdapter(GreedyBytes))),
"header"
/ RawCopy(
Struct(
Const(0x2131, Int16ub),
"length" / Rebuild(Int16ub, Utils.get_length),
"unknown" / Default(Int32ub, 0x00000000),
"device_id" / Hex(Bytes(4)),
"ts" / TimeAdapter(Default(Int32ub, datetime.datetime.utcnow())),
)
),
"checksum"
/ IfThenElse(
Utils.is_hello,
Bytes(16),
Checksum(Bytes(16), Utils.md5, Utils.checksum_field_bytes),
),
)
|
#!/usr/bin/env python
# encoding: utf-8
import setuptools #import setup
from numpy.distutils.core import setup, Extension
import os
import platform
os.environ['NPY_DISTUTILS_APPEND_FLAGS'] = '1'
# Source order is important for dependencies
f90src = ['WavDynMods.f90',
'PatclVelct.f90',
'BodyIntgr.f90',
'BodyIntgr_irr.f90',
'AssbMatx.f90',
'AssbMatx_irr.f90',
'SingularIntgr.f90',
'InfGreen_Appr.f90',
'FinGrnExtSubs.f90',
'FinGreen3D.f90',
'CalGreenFunc.f90',
'HydroStatic.f90',
'ImplementSubs.f90',
'InputFiles.f90',
'NormalProcess.f90',
'ReadPanelMesh.f90',
'PotentWavForce.f90',
'PressureElevation.f90',
'PrintOutput.f90',
'SolveMotion.f90',
'WavDynSubs.f90',
'HAMS_Prog.f90',
'HAMS_Prog.pyf',
]
root_dir = os.path.join('pyhams','src')
pyhamsExt = Extension('pyhams.libhams', sources=[os.path.join(root_dir,m) for m in f90src],
extra_f90_compile_args=['-O3','-m64','-fPIC','-fno-align-commons','-fdec-math'],
libraries=['lapack'],
extra_link_args=['-fopenmp'])
extlist = [] if platform.system() == 'Windows' else [pyhamsExt]
setup(
name='pyHAMS',
version='1.0.0',
description='Python module wrapping around HAMS',
author='NREL WISDEM Team',
author_email='systems.engineering@nrel.gov',
license='Apache License, Version 2.0',
package_data={'pyhams': []},
packages=['pyhams'],
ext_modules=extlist,
)
|
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from kabzimal.rest.views.category import CategoryViewSet
from kabzimal.rest.views.category_type import CategoryTypeViewSet
from kabzimal.rest.views.invoices import InvoicesViewSet
from kabzimal.rest.views.orders import OrdersViewSet
from kabzimal.rest.views.oreder_items import OrderItemsViewSet
from kabzimal.rest.views.payments import PaymentsViewSet
from kabzimal.rest.views.products import ProductsViewSet
from django.conf.urls.static import static
from django.conf import settings
router = DefaultRouter()
router.register('category', CategoryViewSet, basename="category")
router.register('category-type', CategoryTypeViewSet, basename="category-type")
router.register('products', ProductsViewSet, basename="products")
router.register('orders', OrdersViewSet, basename="orders")
router.register('order-items', OrderItemsViewSet, basename="order-items")
router.register('payments', PaymentsViewSet, basename="payments")
router.register('invoices', InvoicesViewSet, basename="invoices")
router.register('invoices', InvoicesViewSet, basename="invoices")
urlpatterns = [
url(r'^', include(router.urls)),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import time
from anubis.lms.autograde import bulk_autograde
from anubis.models import db
from anubis.utils.data import with_context
from anubis.utils.testing.db import clear_database
from anubis.utils.testing.seed import create_assignment, create_course, create_students, init_submissions
def do_seed() -> str:
clear_database()
# OS test course
intro_to_os_students = create_students(100)
intro_to_os_course = create_course(
intro_to_os_students,
name="Intro to OS",
course_code="CS-UY 3224",
section="A",
professor_display_name="Gustavo",
autograde_tests_repo="https://github.com/os3224/anubis-assignment-tests",
github_org="os3224",
)
os_assignment0, _, os_submissions0, _ = create_assignment(
intro_to_os_course,
intro_to_os_students,
i=0,
github_repo_required=True,
submission_count=50,
)
init_submissions(os_submissions0)
db.session.commit()
return os_assignment0.id
@with_context
def main():
print("Seeding submission data")
seed_start = time.time()
assignment_id = do_seed()
seed_end = time.time()
print("Seed done in {}s".format(seed_end - seed_start))
n = 10
timings = []
print(f"Running bulk autograde on assignment {n} times [ 5K submissions, across 50 students ]")
for i in range(n):
print(f"autograde pass {i + 1}/{n} ", end="", flush=True)
db.session.expunge_all()
start = time.time()
bulk_autograde(assignment_id, limit=100)
end = time.time()
timings.append(end - start)
print("{:.2f}s".format(end - start))
print("Average time :: {:.2f}s".format(sum(timings) / len(timings)))
if __name__ == "__main__":
main()
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class CategoriesConfig(AppConfig):
name = 'joorab.apps.categories'
verbose_name = _("Categories")
def ready(self):
from . import signals
|
#coding:utf-8
from pyglet.gl import *
import OpenGL.GL.shaders
import ctypes
import pyrr
import time
from math import sin
class Triangle:
def __init__(self):
self.triangle = [-0.5, -0.5, 0.0, 1.0, 0.0, 0.0,
0.5, -0.5, 0.0, 0.0, 1.0, 0.0,
0.0, 0.5, 0.0, 0.0, 0.0, 1.0]
self.vertex_shader_source = b"""
#version 440
in layout(location = 0) vec3 position;
in layout(location = 1) vec3 color;
uniform mat4 scale;
uniform mat4 rotate;
uniform mat4 translate;
out vec3 newColor;
void main()
{
gl_Position = translate * rotate * scale * vec4(position, 1.0f);
newColor = color;
}
"""
self.fragment_shader_source = b"""
#version 440
in vec3 newColor;
out vec4 outColor;
void main()
{
outColor = vec4(newColor, 1.0f);
}
"""
vertex_buff = ctypes.create_string_buffer(self.vertex_shader_source)
c_vertex = ctypes.cast(ctypes.pointer(ctypes.pointer(vertex_buff)), ctypes.POINTER(ctypes.POINTER(GLchar)))
vertex_shader = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(vertex_shader, 1, c_vertex, None)
glCompileShader(vertex_shader)
fragment_buff = ctypes.create_string_buffer(self.fragment_shader_source)
c_fragment = ctypes.cast(ctypes.pointer(ctypes.pointer(fragment_buff)), ctypes.POINTER(ctypes.POINTER(GLchar)))
fragment_shader = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(fragment_shader, 1, c_fragment, None)
glCompileShader(fragment_shader)
shader = glCreateProgram()
glAttachShader(shader, vertex_shader)
glAttachShader(shader, fragment_shader)
glLinkProgram(shader)
glUseProgram(shader)
vbo = GLuint(0)
glGenBuffers(1, vbo)
glBindBuffer(GL_ARRAY_BUFFER, vbo)
glBufferData(GL_ARRAY_BUFFER, 72, (GLfloat * len(self.triangle))(*self.triangle), GL_STATIC_DRAW)
#positions
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))
glEnableVertexAttribArray(0)
#colors
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))
glEnableVertexAttribArray(1)
self.scale_loc = glGetUniformLocation(shader, b'scale')
self.rotate_loc = glGetUniformLocation(shader, b'rotate')
self.translate_loc = glGetUniformLocation(shader, b'translate')
self.scale = pyrr.Matrix44.identity()
self.rot_y = pyrr.Matrix44.identity()
self.translate = pyrr.Matrix44.identity()
def transform(self):
ct = time.clock()
self.scale = pyrr.Matrix44.from_scale(pyrr.Vector3([abs(sin(ct)), abs(sin(ct)), 1.0])).flatten()
self.rot_y = pyrr.Matrix44.from_y_rotation(ct*2).flatten()
self.translate = pyrr.Matrix44.from_translation(pyrr.Vector3([sin(ct), sin(ct*0.5), 0.0])).flatten()
c_scale = (GLfloat * len(self.scale))(*self.scale)
c_rotate_y = (GLfloat * len(self.rot_y))(*self.rot_y)
c_translate = (GLfloat * len(self.translate))(*self.translate)
glUniformMatrix4fv(self.scale_loc, 1, GL_FALSE, c_scale)
glUniformMatrix4fv(self.rotate_loc, 1, GL_FALSE, c_rotate_y)
glUniformMatrix4fv(self.translate_loc, 1, GL_FALSE, c_translate)
class MyWindow(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_minimum_size(400, 300)
glClearColor(0.2, 0.3, 0.2, 1.0)
self.triangle = Triangle()
def on_draw(self):
self.clear()
glDrawArrays(GL_TRIANGLES, 0, 3)
def on_resize(self, width, height):
glViewport(0, 0, width, height)
def update(self, dt):
self.triangle.transform()
if __name__ == "__main__":
window = MyWindow(800, 600, "My Pyglet Window", resizable=True)
pyglet.clock.schedule_interval(window.update, 1/30.0)
pyglet.app.run()
|
from sys import prefix
import boto3
import botocore.exceptions
from core.utils import get_path
class Storage:
def __init__(self, bucket_name: str, index: str = 'index.html') -> None:
self.__client = boto3.resource('s3')
self.__bucket_name = bucket_name
self.__index = index
def get_index(self):
data = self.__client.Object(self.__bucket_name, self.__index)
try:
return data.get()['Body'].read().decode('utf-8')
except botocore.exceptions.ClientError:
return ''
def upload_package(self, package, dist_path: str = None):
for filename in package.files:
path = f'{get_path(dist_path)}/{filename}'
with open(path, mode='rb') as f:
self.__client.Object(self.__bucket_name, f'{package.name}/{filename}').put(
Body=f,
ContentType='application/x-gzip',
ACL='private'
)
def upload_index(self, html: str):
self.__client.Object(self.__bucket_name, self.__index).put(
Body=html,
ContentType='text/html',
CacheControl='public, must-revalidate, proxy-revalidate, max-age=0',
ACL='private'
)
def package_exists(self, path: str):
client = boto3.client('s3')
prefix = path
if prefix.startswith('/'):
prefix = prefix[1:] if len(prefix) > 1 else prefix
files = client.list_objects(Bucket=self.__bucket_name, Prefix=prefix)
return len(files.get('Contents', [])) > 0
|
def extractMatchashortcakeWordpressCom(item):
'''
Parser for 'matchashortcake.wordpress.com'
'''
return None
|
# coding: utf-8
# In[ ]:
import pandas
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
import glob
import datetime
import itertools
from time import sleep
# In[ ]:
np.random.seed(1)
# In[ ]:
import os
import os.path
import gc
# In[ ]:
import argparse
parser = argparse.ArgumentParser(description = "Please insert the train flag")
# In[ ]:
parser.add_argument('-t', '--train', action = "store",
help='If true, we train and save. Else, otherwise.', required = True)
# In[ ]:
my_args = vars(parser.parse_args())
trainFlag = my_args['train']
trainFlag = trainFlag.lower() in ("True", "t", "true", "1", 1)
# In[ ]:
print datetime.datetime.now()
validFilePaths = []
for f in os.listdir("data/anomaly_data"):
filePath = os.path.join("data/anomaly_data", f)
if os.path.isdir(filePath):
continue
if os.stat(filePath).st_size <= 3:
continue
validFilePaths.append(filePath)
numF = int(1 * len(validFilePaths))
print 'Using this many files {0}'.format(numF)
validFilePaths = np.random.choice(validFilePaths, numF, replace=False)
df_list = (pandas.read_csv(f) for f in validFilePaths)
df = pandas.concat(df_list, ignore_index=True)
df = df[df['radiant_win'].notnull()]
# In[ ]:
print df.shape
columns = df.columns
df_catInteger_features_example = filter(lambda x: 'hero_id' in x, columns)
# In[ ]:
from itertools import chain
# these will require string processing on the column names to work
numericalFeatures = ['positive_votes', 'negative_votes', 'first_blood_time', 'radiant_win',
'duration', 'kills', 'deaths', 'assists', 'kpm', 'kda', 'hero_dmg',
'gpm', 'hero_heal', 'xpm', 'totalgold', 'totalxp', 'lasthits', 'denies',
'tower_kills', 'courier_kills', 'observer_uses', 'sentry_uses',
'ancient_kills', 'camps_stacked', 'abandons'] #apm problem
categoricalIntegerFeatures = ['hero_id']#['barracks_status', 'tower_status', 'hero_id']
#'item0', 'item1', 'item2', 'item3', 'item4', 'item5']
categoricalFullFeatures = ['patch']
numFeatures = [filter(lambda x: z in x, columns) for z in numericalFeatures]
categoricalIntegerFeatures = [filter(lambda x: z in x, columns) for z in categoricalIntegerFeatures]
catFull = [filter(lambda x: z in x, columns) for z in categoricalFullFeatures]
numFeatures = list(chain(*numFeatures))
categoricalIntegerFeatures = list(chain(*categoricalIntegerFeatures))
catFull = list(chain(*catFull))
# In[ ]:
match_ids = df['match_id']
df_numerical = df[numFeatures]
df_numerical.loc[:, 'radiant_win'] = df_numerical.loc[:, 'radiant_win'].apply(lambda x : int(x))
df_numerical.iloc[:, 1:len(df_numerical.columns)] = df_numerical.iloc[:, 1:len(df_numerical.columns)].apply(lambda x: (x - np.nanmean(x)) / (np.nanmax(x) - np.nanmin(x)))
df_numerical = df_numerical.fillna(0)
df_numerical['radiant_win'] = df_numerical['radiant_win'].apply(lambda x: 1 if x >= 0 else 0)
df = df_numerical
# In[ ]:
x = np.random.rand(df.shape[0])
mask = np.where(x < 0.75)[0]
mask2 = np.where(x >= 0.75)[0]
df_train = df.iloc[mask, :]
df_test = df.iloc[mask2, :]
match_ids_train = match_ids.iloc[mask]
match_ids_test = match_ids.iloc[mask2]
# In[ ]:
NumFeatures = df.shape[1]
layer_size = [int(NumFeatures * 0.75), NumFeatures]
# In[ ]:
print NumFeatures
# In[ ]:
print df_train.shape
# In[ ]:
x = tf.placeholder(tf.float32, [None, NumFeatures])
y = x
#encoders
weights_1 = tf.Variable(tf.random_normal([NumFeatures, layer_size[0]], stddev = 1.0/NumFeatures/100), name='weights_1')
bias_1 = tf.Variable(tf.random_normal([layer_size[0]], stddev = 1.0/NumFeatures/100), name='bias_1')
#decoders
weights_2 = tf.Variable(tf.random_normal([layer_size[0], layer_size[1]], stddev = 1.0/NumFeatures/100), name='weights_2')
bias_2 = tf.Variable(tf.random_normal([layer_size[1]], stddev = 1.0/NumFeatures/100), name='bias_2')
layer1 = tf.tanh(tf.matmul(x, weights_1) + bias_1)
output = tf.tanh(tf.matmul(layer1, weights_2) + bias_2)
cost = tf.reduce_mean(tf.reduce_sum(tf.pow(y-output, 2), 1))
rank = tf.rank(cost)
learning_rate = 0.000001
beta1 = 0.5
beta2 = 0.5
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1, beta2=beta2)
gradients, variables = zip(*optimizer.compute_gradients(cost))
gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
train_op = optimizer.apply_gradients(zip(gradients, variables))
variable_dict = {'weights_1': weights_1, 'weights_2': weights_2,
'bias_1': bias_1, 'bias_2': bias_2}
saver = tf.train.Saver(variable_dict)
init = tf.global_variables_initializer()
ckpoint_dir = os.path.join(os.getcwd(), 'model-backups/model.ckpt')
# In[ ]:
flatten = lambda l: [item for sublist in l for item in sublist]
import requests
import json
def canIAnalyzeThisMatch(currentMatchID):
host = "https://api.opendota.com/api/matches/" + str(currentMatchID)
data = {'match_id': currentMatchID}
data = requests.get(host, data)
return data.status_code == 200
def test(sess, test_data):
batch = test_data
data = batch.as_matrix()
data = data.astype(np.float32)
layer1 = tf.tanh(tf.matmul(data, weights_1) + bias_1)
output = tf.tanh(tf.matmul(layer1, weights_2) + bias_2)
residuals = tf.reduce_sum(tf.abs(output - tf.cast(data, tf.float32)), axis = 1)
output_results, residuals = sess.run([output, residuals])
indices = np.argsort(residuals)[::-1]
return data, output_results, indices, residuals
# In[ ]:
def train():
numEpochs = 1000
numBatches = 1000
batchSize = int(round(0.01 * df_train.shape[0]))
for epochIter in xrange(numEpochs):
print 'Epoch: {0}'.format(epochIter)
gc.collect()
batch = df_train.sample(n=batchSize).as_matrix()
temp_out = sess.run(cost, feed_dict = {x: batch})
print temp_out
if (epochIter+1) % 50 == 0:
saver.save(sess, ckpoint_dir)
for batchItr in xrange(numBatches):
batch = df_train.sample(n=batchSize).as_matrix()
sess.run(train_op, feed_dict = {x : batch})
with tf.Session() as sess:
if sess.run(rank) != 0:
raise Exception("Wrong dimenions of cost")
if (trainFlag):
sess.run(init)
train()
else:
print 'Doing test'
saver.restore(sess, ckpoint_dir)
np.savetxt("data/weights1.csv", weights_1.eval(), delimiter=",")
np.savetxt("data/bias1.csv", bias_1.eval(), delimiter=",")
np.savetxt("data/weights2.csv", weights_2.eval(), delimiter=",")
np.savetxt("data/bias2.csv", bias_2.eval(), delimiter=",")
anomalies, output, indices_test, residuals = test(sess, df_test)
anomaliesSave = anomalies[indices_test, :]
output = output[indices_test, :]
print anomalies[0, 0:10]
print output[0, 0:10]
np.savetxt("data/anomalies.csv", anomaliesSave, delimiter=",")
np.savetxt("data/output.csv", output, delimiter=",")
np.savetxt('data/indices.csv', indices_test, delimiter = ',')
anomalizedAnalizable = match_ids_test.values
goodMatches = []
print len(anomalizedAnalizable)
for i in range(len(anomalizedAnalizable)):
an = anomalizedAnalizable[i]
residual = residuals[i]
goodMatches.append([int(an), residual])
np.savetxt('data/goodAnomaliesResidual.csv', np.array(goodMatches), delimiter = ',')
# In[ ]:
print 'Done'
print datetime.datetime.now()
# In[ ]:
|
import copy
import random
from cumulusci.core.config import BaseGlobalConfig
from cumulusci.core.config import BaseProjectConfig
from cumulusci.core.keychain import BaseProjectKeychain
from cumulusci.core.config import OrgConfig
def random_sha():
hash = random.getrandbits(128)
return "%032x" % hash
def create_project_config(
repo_name="TestRepo", repo_owner="TestOwner", repo_commit=None
):
global_config = BaseGlobalConfig()
project_config = DummyProjectConfig(
global_config=global_config,
repo_name=repo_name,
repo_owner=repo_owner,
repo_commit=repo_commit,
config=copy.deepcopy(global_config.config),
)
keychain = BaseProjectKeychain(project_config, None)
project_config.set_keychain(keychain)
return project_config
class DummyProjectConfig(BaseProjectConfig):
def __init__(
self, global_config, repo_name, repo_owner, repo_commit=None, config=None
):
repo_info = {
"owner": repo_owner,
"name": repo_name,
"url": f"https://github.com/{repo_owner}/{repo_name}",
"commit": repo_commit or random_sha(),
}
super(DummyProjectConfig, self).__init__(
global_config, config, repo_info=repo_info
)
class DummyOrgConfig(OrgConfig):
def __init__(self, config=None, name=None):
if not name:
name = "test"
super(DummyOrgConfig, self).__init__(config, name)
def refresh_oauth_token(self, keychain):
pass
class DummyLogger(object):
def __init__(self):
self.out = []
def log(self, msg, *args):
self.out.append(msg % args)
# Compatibility with various logging methods like info, warning, etc
def __getattr__(self, name):
return self.log
def get_output(self):
return "\n".join(self.out)
|
from dataclasses import dataclass
from .t_participant_association import TParticipantAssociation
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class ParticipantAssociation(TParticipantAssociation):
class Meta:
name = "participantAssociation"
namespace = "http://www.omg.org/spec/BPMN/20100524/MODEL"
|
"""
Copyright (c) 2012-2013 Limor Fried, Kevin Townsend and Mikey Sklar for Adafruit Industries. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met: * Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided
with the distribution. * Neither the name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
#!/usr/bin/python
import time, signal, sys
from SF_ADC import ADS1x15
def signal_handler(signal, frame):
print 'You pressed Ctrl+C!'
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
#print 'Press Ctrl+C to exit'
# Select the gain
# gain = 6144 # +/- 6.144V
gain = 4096 # +/- 4.096V
# gain = 2048 # +/- 2.048V
# gain = 1024 # +/- 1.024V
# gain = 512 # +/- 0.512V
# gain = 256 # +/- 0.256V
# Select the sample rate
# sps = 128 # 128 samples per second
sps = 250 # 250 samples per second
# sps = 490 # 490 samples per second
# sps = 920 # 920 samples per second
# sps = 1600 # 1600 samples per second
# sps = 2400 # 2400 samples per second
# sps = 3300 # 3300 samples per second
# Initialise the ADC
# Full options = ADCS1x15(address=0x48, I2CPort=1)
adc = ADS1x15()
# Read channel 0 in single-ended mode using the settings above
volts = adc.readADCSingleEnded(0, gain, sps) / 1000
print "%.6f" % (volts)
volts = adc.readADCSingleEnded(1, gain, sps) / 1000
print "%.6f" % (volts)
volts = adc.readADCSingleEnded(2, gain, sps) / 1000
print "%.6f" % (volts)
volts = adc.readADCSingleEnded(3, gain, sps) / 1000
print "%.6f" % (volts)
# To read channel 3 in single-ended mode, +/- 1.024V, 860 sps use:
# volts = adc.readADCSingleEnded(3, 1024, 860)
|
import fileinput
counter = 0
triangles = []
templines = []
for line in fileinput.input():
templines.append([int(a) for a in line.split()])
if len(templines) == 3:
for asdf in range(3):
triangles.append((templines[0][asdf], templines[1][asdf], templines[2][asdf]))
templines = []
for sides in triangles:
sides = sorted(sides)
if sides[0] + sides[1] > sides[2]:
counter += 1
print(counter)
|
import time
from nitter_scraper import NitterScraper
last_tweet_id = None
with NitterScraper(port=8008) as nitter:
while True:
for tweet in nitter.get_tweets("dgnsrekt", pages=1, break_on_tweet_id=last_tweet_id):
if tweet.is_pinned is True:
continue
if tweet.is_retweet is True:
continue
if tweet.tweet_id != last_tweet_id:
print(tweet.json(indent=4))
last_tweet_id = tweet.tweet_id
break
time.sleep(0.1)
|
from torch import cat, cos, float64, sin, stack, tensor
from torch.nn import Module, Parameter
from core.dynamics import RoboticDynamics
class CartPole(RoboticDynamics, Module):
def __init__(self, m_c, m_p, l, g=9.81):
RoboticDynamics.__init__(self, 2, 1)
Module.__init__(self)
self.params = Parameter(tensor([m_c, m_p, l, g], dtype=float64))
def D(self, q):
m_c, m_p, l, _ = self.params
_, theta = q
return stack(
(stack([m_c + m_p, m_p * l * cos(theta)]),
stack([m_p * l * cos(theta), m_p * (l ** 2)])))
def C(self, q, q_dot):
_, m_p, l, _ = self.params
z = tensor(0, dtype=float64)
_, theta = q
_, theta_dot = q_dot
return stack((stack([z, -m_p * l * theta_dot * sin(theta)]),
stack([z, z])))
def U(self, q):
_, m_p, l, g = self.params
_, theta = q
return m_p * g * l * cos(theta)
def G(self, q):
_, m_p, l, g = self.params
_, theta = q
z = tensor(0, dtype=float64)
return stack([z, -m_p * g * l * sin(theta)])
def B(self, q):
return tensor([[1], [0]], dtype=float64)
|
import sys
import ast
import inspect
import textwrap
from typing_inspect_isle import class_typevar_mapping
from dataclasses import (
is_dataclass,
MISSING as DC_MISSING,
asdict,
fields as dc_fields,
Field as DCField,
)
from typing import (
Any,
Type,
Dict,
cast,
Union,
Mapping,
Optional,
TypeVar,
Tuple,
Callable,
List,
)
from typing_inspect_isle import (
is_optional_type,
is_union_type,
get_args,
is_generic_type,
get_origin,
is_typevar,
)
from marshmallow import fields, Schema
from marshmallow.fields import Field
from collections.abc import Mapping as MappingABC
from ._settings_classes import (
HandlerType,
_SchemaGenSettings,
_FieldGenSettings,
Garams,
DEFAULT,
)
from ._field_conversion import FIELD_CONVERSION
from ._schema_classes import DataSchemaConcrete
from ._field_classes import NestedOptional
from ._schema_classes import DataSchema
from ._fast_conversion import FastEncoder
DEFAULT_SCHEMA: Type[DataSchemaConcrete] = DataSchema
SchemaType = TypeVar("SchemaType", bound=DataSchemaConcrete)
def dataclass_schema(
data_class: Any,
schema_base: Type[SchemaType] = DEFAULT_SCHEMA, # type: ignore
type_handlers: "Optional[Dict[Type[Any], Type[HandlerType]]]" = None,
add_handler: bool = True,
) -> Type[SchemaType]:
"""
Converts a dataclass to a Marshmallow schema
:param data_class: dataclass to convert
:param schema_base: ``marshmallow.Schema`` class to subclass
:param type_handlers: ``{type, Schema}`` mapping of existing schemas to use for
given type.
:param add_handler: Whether to add this schema to ``type_handlers``.
:return: Subclass of ``schema_base`` with dataclass fields converted to Marshmallow
fields.
"""
settings = _configure_settings(data_class, schema_base, type_handlers)
class_dict = _get_schema_dict(schema_base, settings)
this_schema = type(f"{data_class.__name__}Schema", (schema_base,), class_dict)
this_schema = cast(Type[Schema], this_schema)
# add schema as new type handler.
if add_handler and type_handlers is not None:
type_handlers[data_class] = this_schema
# generate the fast encoder for this schema.
class SchemaFastEncoder(FastEncoder, type_handlers=type_handlers): # type: ignore
pass
this_schema._FAST_ENCODER = SchemaFastEncoder # type: ignore
return this_schema # type: ignore
def schema_for(
data_class: Any,
type_handlers: "Optional[Dict[Type[Any], Type[HandlerType]]]" = None,
add_handler: bool = True,
) -> Callable[[Type[SchemaType]], Type[SchemaType]]:
"""
Class decorator for Schema class that adds marshmallow fields for ``data_class``
:param data_class: class to alter schema for
:param type_handlers: ``{type, Schema}`` mapping of existing schemas to use for
given type.
:return: Same schema class object passed in, with added fields for ``data_class``
"""
def class_gen(schema_class: Type[SchemaType]) -> Type[SchemaType]:
gen_class = dataclass_schema(
data_class, schema_class, type_handlers, add_handler=add_handler
)
existing_dict = dict(schema_class.__dict__)
existing_dict.update(gen_class.__dict__)
existing_dict.pop("__doc__")
for name, item in existing_dict.items():
setattr(schema_class, name, item)
# If the schema doesn't have it's own doc string, use the one from the
# dataclass.
if schema_class.__doc__ is None:
schema_class.__doc__ = data_class.__doc__
# add schema as new type handler.
if add_handler and type_handlers is not None:
type_handlers[data_class] = schema_class
return schema_class
return class_gen
def _configure_settings(
data_class: Type[Any],
schema_base: Type[SchemaType],
type_handlers: "Optional[Dict[Type[Any], Type[HandlerType]]]",
) -> _SchemaGenSettings:
"""sets up schema settings based on params"""
if not is_dataclass(data_class) or not isinstance(data_class, type):
raise ValueError(f"{data_class} is not dataclass type")
if type_handlers is None:
type_handlers = dict()
docstrings = get_dataclass_field_docstrings(data_class)
settings = _SchemaGenSettings(data_class, schema_base, type_handlers, docstrings)
# remove any handlers who's types are being properly handled.
default_converters = {
k: v for k, v in FIELD_CONVERSION.items() if k not in settings.type_handlers
}
settings.type_handlers.update(default_converters)
return settings
def _get_schema_dict(
schema: Type[Schema], settings: _SchemaGenSettings
) -> Dict[str, Any]:
"""Monkey-patches marshmallow fields to schema"""
class_dict: Dict[str, Any] = dict()
if sys.version_info[:2] >= (3, 7):
class_typevar_mapping(settings.data_class, settings.type_var_index)
this_field: DCField
for this_field in dc_fields(settings.data_class):
class_dict[this_field.name] = _convert_type(this_field, settings)
if issubclass(schema, DataSchemaConcrete):
class_dict["__model__"] = settings.data_class
f: DCField
class_dict["_dump_only"] = [
f.name
for f in dc_fields(settings.data_class)
if f.metadata is not None and f.metadata.get("garams", Garams()).dump_only
]
return class_dict
def _convert_type(
data_field: Union[DCField, Type], schema_settings: _SchemaGenSettings
) -> fields.Field:
"""Converts dataclass field to Marshmallow field"""
settings = _FieldGenSettings(data_field, schema_settings)
if is_typevar(settings.type) and sys.version_info[:2] >= (3, 7):
_unpack_type_var(settings)
settings.type, settings.optional = _unpack_optional(settings.type)
if is_typevar(settings.type) and sys.version_info[:2] >= (3, 7):
_unpack_type_var(settings)
settings.type, optional_second = _unpack_optional(settings.type)
settings.optional = settings.optional or optional_second
_validate_type(settings.type)
settings.data_handler = _get_handler_type(settings)
if issubclass(settings.data_handler, Schema):
settings.args = (settings.data_handler,)
settings.data_handler = fields.Nested
elif (
is_dataclass(settings.type)
and settings.type not in settings.schema_settings.type_handlers
):
# We make a nested schema if a dataclass does not already have a type handler
nested_schema = dataclass_schema(
settings.type,
# We need to pass a clean base here, as the incoming one might have
# validators and the like attached to it from a decorated schema.
schema_base=DataSchemaConcrete,
type_handlers=schema_settings.type_handlers,
)
settings.args = (nested_schema,)
elif is_generic_type(settings.type):
_get_interior_fields(settings)
_generate_field_options(settings)
marshmallow_field = settings.data_handler(*settings.args, **settings.kwargs)
return marshmallow_field
def _unpack_type_var(settings: _FieldGenSettings) -> None:
try:
settings.type = settings.schema_settings.type_var_index[settings.type]
except KeyError:
raise TypeError(f"Label '{settings.type}' does not have concrete type")
def _validate_type(field_type: Any) -> None:
"""checks for bad field types"""
if is_union_type(field_type):
raise TypeError("model fields cannot contain unions")
if field_type is Any:
raise TypeError("model fields cannot have type Any")
if not is_generic_type(field_type):
if issubclass(field_type, (list, tuple)):
raise TypeError("model field collections must be generic type with args")
def _get_handler_type(settings: _FieldGenSettings) -> Type[HandlerType]:
"""Gets Marshmallow field/schema based on type"""
for handler_type, handler in settings.schema_settings.type_handlers.items():
test_type = get_origin(settings.type) or settings.type
if issubclass(test_type, handler_type):
return handler
if is_dataclass(settings.type):
return NestedOptional
raise TypeError(f"No type handler exists for {settings.type}")
def _add_field_default(settings: _FieldGenSettings) -> None:
"""
Determines defaults for Marshmallow Field and adds them as kwargs for Marshmallow
fields
"""
# if a field, has a default or default factory, we can add that to the object
settings.data_field = cast(DCField, settings.data_field)
if settings.data_field.default is not DC_MISSING:
settings.kwargs["required"] = False
settings.kwargs["missing"] = settings.data_field.default
settings.kwargs["default"] = settings.data_field.default
elif settings.data_field.default_factory is not DC_MISSING: # type: ignore
settings.kwargs["required"] = False
settings.kwargs["missing"] = settings.data_field.default_factory # type: ignore
settings.kwargs["default"] = settings.data_field.default_factory # type: ignore
else:
settings.kwargs["required"] = True
def _generate_field_options(settings: _FieldGenSettings) -> None:
"""generates the options for Marshmallow's Field class"""
if settings.data_field is not None:
_add_field_default(settings)
else:
settings.kwargs["required"] = True
# if a field is optional, it is not required
if settings.optional:
settings.kwargs["allow_none"] = True
else:
settings.kwargs["allow_none"] = False
if settings.data_field is None or settings.data_field.metadata is None:
return
# apply docstrings
try:
docstring = settings.schema_settings.field_docstrings[settings.data_field.name]
except KeyError:
pass
else:
settings.kwargs["description"] = docstring
# handle passed marshmallow params
try:
passed_kwargs: Garams = settings.data_field.metadata["garams"]
except KeyError:
return
settings.kwargs.update(
(key, value)
for key, value in asdict(passed_kwargs).items()
if value is not DEFAULT
)
# if something is required, marshmallow disallows defaults, so we'll remove those.
if settings.kwargs["required"]:
settings.kwargs.pop("missing", None)
settings.kwargs.pop("default", None)
def _get_interior_fields(settings: _FieldGenSettings) -> None:
"""
Converts inner fields of a generic to options/arguments for it's Marshmallow
container.
"""
inner_fields: List[Field] = [
_convert_type(t, settings.schema_settings) for t in get_args(settings.type)
]
if get_origin(settings.type) in [Mapping, dict, MappingABC]:
settings.kwargs["keys"] = inner_fields[0]
settings.kwargs["values"] = inner_fields[1]
elif len(inner_fields) == 1 and isinstance(inner_fields[0], fields.Nested):
# We need to handle lists of schemas differently, so that kwargs are passed
# to them at runtime.
inner = inner_fields[0]
settings.data_handler = NestedOptional
settings.args = (inner.nested,)
settings.kwargs["many"] = True
if inner.allow_none is True: # type: ignore
settings.optional = True
else:
settings.args = tuple(inner_fields)
def _filter_none_type(data_type: Type) -> bool:
"""Filters out NoneType when getting union args"""
if is_typevar(data_type):
return True
if issubclass(data_type, type(None)):
return False
return True
def _unpack_optional(data_type: Type) -> Tuple[Type, bool]:
"""gets type of optional type"""
# we need to unpack optional types, which are really just unions with None
optional = False
while is_optional_type(data_type):
optional = True
data_types = tuple(t for t in get_args(data_type) if _filter_none_type(t))
if len(data_types) > 1:
raise TypeError("model fields cannot contain unions")
data_type = data_types[0]
return data_type, optional
def get_dataclass_field_docstrings(data_class: Type[Any]) -> Dict[str, str]:
docstring_dict: Dict[str, str] = dict()
_recurse_dataclass_for_docstrings(data_class, docstring_dict)
return docstring_dict
def _format_field_docstring(docstring: str) -> str:
"""
Remove leading and trailing newlines from multi-line descriptions, as it can affect
formatting of redoc and other documentation tools.
"""
docstring = textwrap.dedent(docstring)
description = docstring.strip("\n").rstrip("\n")
# Add a period for consistency.
if not description.endswith("."):
description += "."
# Capitalize first letter for consistency.
first_letter = description[0]
capitalized = first_letter.capitalize()
description = capitalized + description[1:]
return description
def _recurse_dataclass_for_docstrings(
data_class: Type[Any], current_dict: Dict[str, str]
) -> None:
if is_dataclass(data_class):
try:
source = inspect.getsource(data_class)
except OSError:
pass
else:
source = textwrap.dedent(source)
parsed = ast.parse(source)
is_attr = False
for item in parsed.body[0].body: # type: ignore
if hasattr(item, "target"):
is_attr = True
attr_name = item.target
continue
if is_attr and hasattr(item, "value"):
if attr_name.id not in current_dict:
description: str = _format_field_docstring(item.value.s)
current_dict[attr_name.id] = description
elif not hasattr(item, "target"):
is_attr = False
for this_class in data_class.__bases__:
_recurse_dataclass_for_docstrings(this_class, current_dict)
|
"""Unit and functional tests."""
|
#!/usr/bin/env python3
"""tests for max_rep.py"""
from subprocess import getstatusoutput, getoutput
import os
import random
import re
import string
import max_rep
prg = "./max_rep.py"
# --------------------------------------------------
def test_usage():
"""usage"""
rv1, out1 = getstatusoutput(prg)
assert rv1 > 0
assert re.match("usage", out1, re.IGNORECASE)
# --------------------------------------------------
def test_bad_usage():
rv2, out2 = getstatusoutput("{} -w 275 -r foo".format(prg))
assert rv2 > 0
assert re.match('the value given "foo" is not a digit', out2, re.IGNORECASE)
rv3, out3 = getstatusoutput("{} -w 0 -r 5".format(prg))
print('hello', rv3)
assert rv3 > 0
assert out3 == "This is not a valid weight for an average human..."
# --------------------------------------------------
def test_good_usage():
rv3, out3 = getstatusoutput("{} -w 275 -r 5".format(prg))
print('hello',rv3)
print(out3)
assert rv3 == 0
assert out3.rstrip() =='Based upon your 5 reps at a weight of 275 lbs, your 1 rep max is 320.83 lbs\nHappy Lifting'
|
import autoencoder as ae
import numpy as np
import itertools
import pandas as pd
import os
import sys
from sklearn.model_selection import train_test_split
def main(*argv):
# Starting dimensions
dims = 190
# K-fold folds
folds = 5
# Grid search params
lr = [0.0001, 0.001, 0.01, 0.1]
batch = [32, 64, 128, 256]
epochs = [10, 50, 75, 100, 150]
# Param vector
iter_dims = []
# List of key params to test
key_params = list(itertools.product(lr,batch,epochs))
### Input files ###
# Open normalized data and dependent, non-normalized data
dfx = pd.read_csv(f"{os.environ['HOME']}/github_repos/Pollution-Autoencoders/data/data_norm/co_data_norm.csv")
dfy = pd.read_csv(f"{os.environ['HOME']}/github_repos/Pollution-Autoencoders/data/data_clean/co_data_clean.csv")
# City names to append
cities = dfy['city'].values
# Grid params to create model with
param_grid = pd.read_csv(f'/home/nick/github_repos/Pollution-Autoencoders/data/grid_params/hyperparams.csv')
# Name of the component gas
component = 'no2'
file_name=''
# Set x as the normalized values, y (non-normalized) as the daily average of final day
X = dfx.values
Y = dfy.loc[:, ['co_2021_06_06']].values
# Split into train/test data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.20, random_state=40)
# Arguments
for args in sys.argv:
if args == "-k_1_5" or args == "--first-five":
iter_dims = np.arange(1,6,1)
file_name = f'/home/nick/github_repos/Pollution-Autoencoders/data/grid_params/{component}_grid_params_1_5.csv'
elif args == "-k_6_9" or args == "--six-to-nine":
iter_dims = np.arange(6,10,1)
file_name = f'/home/nick/github_repos/Pollution-Autoencoders/data/grid_params/{component}_grid_params_6_9.csv'
elif args == "-k_10_60" or args == "--ten-to-fifty":
iter_dims = np.arange(10,51,10)
file_name = f'/home/nick/github_repos/Pollution-Autoencoders/data/grid_params/{component}_grid_params_10_50.csv'
elif args == "-k_70_120" or args == "--seventy-to-onetwenty":
iter_dims = np.arange(70,121,10)
file_name = f'/home/nick/github_repos/Pollution-Autoencoders/data/grid_params/{component}_grid_params_50_100.csv'
elif args == "-k_all" or args == "--run-all":
iter_dims = np.append(np.arange(1, 10, 1), np.arange(10, 120, 10))
file_name = f'/home/nick/github_repos/Pollution-Autoencoders/data/grid_params/{component}_grid_params_all.csv'
# Perform grid search
ae.grid_search(
file_name=file_name,
x=X,
y=Y,
folds=folds,
component=component,
iter_dims=iter_dims,
key_params=key_params
)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
def concat_classes(classes):
"""
Merges a list of classes and return concatenated string
"""
return ' '.join(_class for _class in classes if _class)
|
#-------------------------------------------------------------
# Name: Create Portal Content
# Purpose: Creates a list of portal content from a CSV file. Can set to only create a single
# item type from the CSV or everything in the CSV.
# - Creates groups, web maps, web mapping apps, dashboards and feature services listed in the CSV.
# - Shares item with organisation and/or groups if specified.
# - Adds users to group if specified.
# - If group already exists, will update the item details and add users to the group
# - Publishes feature service from a zipped FGDB
# - If feature service already exists, updates data in feature service from zipped FGDB
# Author: Shaun Weston (shaun_weston@eagle.co.nz)
# Date Created: 24/01/2019
# Last Updated: 19/11/2019
# ArcGIS Version: ArcGIS API for Python 1.5.2+
# Python Version: 3.6.5+ (Anaconda Distribution)
#--------------------------------
# Import main modules
import os
import sys
import logging
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import email.mime.application
import csv
# Import ArcGIS modules
useArcPy = "false"
useArcGISAPIPython = "true"
if (useArcPy == "true"):
# Import arcpy module
import arcpy
# Enable data to be overwritten
arcpy.env.overwriteOutput = True
if (useArcGISAPIPython == "true"):
# Import arcgis module
import arcgis
import json
# Set global variables
# Logging
enableLogging = "true" # Use within code to print and log messages - printMessage("xxx","info"), printMessage("xxx","warning"), printMessage("xxx","error")
logFile = os.path.join(os.path.dirname(__file__), "CreatePortalContent.log") # e.g. os.path.join(os.path.dirname(__file__), "Example.log")
# Email Use within code to send email - sendEmail(subject,message,attachment)
sendErrorEmail = "false"
emailServerName = "" # e.g. smtp.gmail.com
emailServerPort = None # e.g. 25
emailTo = "" # Address of email sent to
emailUser = "" # Address of email sent from
emailPassword = ""
# Proxy
enableProxy = "false"
requestProtocol = "http" # http or https
proxyURL = ""
# Output
output = None
# Start of main function
def mainFunction(portalURL,portalUser,portalPassword,csvFileLocation,setItemType): # Add parameters sent to the script here e.g. (var1 is 1st parameter,var2 is 2nd parameter,var3 is 3rd parameter)
try:
# --------------------------------------- Start of code --------------------------------------- #
# Connect to GIS portal
printMessage("Connecting to GIS Portal - " + portalURL + "...","info")
gisPortal = arcgis.GIS(url=portalURL, username=portalUser, password=portalPassword, verify_cert=False)
# If item type
if (setItemType):
printMessage("Only creating content of type " + setItemType + "...","info")
# Read the csv file
with open(csvFileLocation) as csvFile:
reader = csv.DictReader(csvFile)
# For each row in the CSV
for row in reader:
processRow = True
# Get the item type from the CSV
itemType = row["Type"]
# If item type
if (setItemType):
# If the row does not equal the item type set, do not process
if (setItemType.lower().replace(" ", "") != itemType.lower().replace(" ", "")):
processRow = False
# If processing this row
if (processRow == True):
# If a title is provided
if (row["Title"].replace(" ", "")):
# Set organisation sharing
if row["Organisation Sharing"]:
organisationSharing = row["Organisation Sharing"]
else:
organisationSharing = "Private"
# Set group sharing
if row["Group Sharing"]:
# Create a list of group IDs to share with
groupSharing = []
groupSharingTitles = row["Group Sharing"].split(",")
for groupTitle in groupSharingTitles:
# Get the item ID of the group
itemID = getIDforPortalItem(gisPortal,groupTitle,"group")
groupSharing.append(itemID)
groupSharing = ','.join(groupSharing)
else:
groupSharing = ""
if (itemType.lower().replace(" ", "") == "group"):
# Create group
createGroup(gisPortal,row["Title"],row["Summary"],row["Description"],row["Tags"],row["Thumbnail"],organisationSharing,row["Members"])
elif (itemType.lower().replace(" ", "") == "webmap"):
# Create web map
createWebMap(gisPortal,row["Title"],row["Summary"],row["Description"],row["Tags"],row["Thumbnail"],organisationSharing,groupSharing,row["Data"])
elif (itemType.lower().replace(" ", "") == "webscene"):
# Create web scene
createWebScene(gisPortal,row["Title"],row["Summary"],row["Description"],row["Tags"],row["Thumbnail"],organisationSharing,groupSharing,row["Data"])
elif (itemType.lower().replace(" ", "") == "webmappingapplication"):
# Create web mapping application
createWebApplication(portalURL,gisPortal,row["Title"],row["Summary"],row["Description"],row["Tags"],row["Thumbnail"],organisationSharing,groupSharing,row["Data"])
elif (itemType.lower().replace(" ", "") == "dashboard"):
# Create dashboard
createDashboard(gisPortal,row["Title"],row["Summary"],row["Description"],row["Tags"],row["Thumbnail"],organisationSharing,groupSharing,row["Data"])
elif (itemType.lower().replace(" ", "") == "form"):
# Create form
createForm(gisPortal,row["Title"],row["Summary"],row["Description"],row["Tags"],row["Thumbnail"],organisationSharing,groupSharing,row["Data"])
elif (itemType.lower().replace(" ", "") == "tileservice"):
# Create tile service
createTileService(gisPortal,row["Title"],row["Summary"],row["Description"],row["Tags"],row["Thumbnail"],organisationSharing,groupSharing,row["Data"])
elif (itemType.lower().replace(" ", "") == "featureservice"):
# Create feature service
createFeatureService(gisPortal,row["Title"],row["Summary"],row["Description"],row["Tags"],row["Thumbnail"],organisationSharing,groupSharing,row["Data"])
elif (itemType.lower().replace(" ", "") == "featureserviceview"):
# Create feature service view
createFeatureServiceView(gisPortal,row["Title"],row["Summary"],row["Description"],row["Tags"],row["Thumbnail"],organisationSharing,groupSharing,row["Data"])
else:
printMessage(row["Title"] + " item in CSV does not have a valid type set and will not be created...","warning")
else:
printMessage("Item in CSV does not have a title set and will not be created...","warning")
# --------------------------------------- End of code --------------------------------------- #
# If called from ArcGIS GP tool
if __name__ == '__main__':
# Return the output if there is any
if output:
# If using ArcPy
if (useArcPy == "true"):
arcpy.SetParameter(1, output)
# ArcGIS desktop not installed
else:
return output
# Otherwise return the result
else:
# Return the output if there is any
if output:
return output
# Logging
if (enableLogging == "true"):
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
# If error
except Exception as e:
# Build and show the error message
# errorMessage = arcpy.GetMessages(2)
errorMessage = ""
# Build and show the error message
# If many arguments
if (e.args):
for i in range(len(e.args)):
if (i == 0):
errorMessage = str(e.args[i]).encode('utf-8').decode('utf-8')
else:
errorMessage = errorMessage + " " + str(e.args[i]).encode('utf-8').decode('utf-8')
# Else just one argument
else:
errorMessage = e
printMessage(errorMessage,"error")
# Logging
if (enableLogging == "true"):
# Log error
logger.error(errorMessage)
# Log end of process
logger.info("Process ended.")
# Remove file handler and close log file
logMessage.flush()
logMessage.close()
logger.handlers = []
if (sendErrorEmail == "true"):
# Send email
sendEmail("Python Script Error",errorMessage,None)
# End of main function
# Start of create group function
def createGroup(gisPortal,title,summary,description,tags,thumbnail,organisationSharing,members):
printMessage("Creating group - " + title + "...","info")
# FUNCTION - Search portal to see if group is already there
groupExists = searchPortalForItem(gisPortal,title,"Group")
# If group has not been created
if (groupExists == False):
# Create the group
group = gisPortal.groups.create(title=title,
description=description,
snippet=summary,
tags=tags,
access=organisationSharing.lower(),
thumbnail=thumbnail,
is_invitation_only=True,
sort_field = 'title',
sort_order ='asc',
is_view_only=False,
users_update_items=False)
printMessage(title + " group created - " + group.id + "...","info")
# Group already exists
else:
# Get the item ID
itemID = getIDforPortalItem(gisPortal,title,"group")
# Get the group
group = gisPortal.groups.get(itemID)
# Update the group
group.update(title=title,
description=description,
snippet=summary,
tags=tags,
access=organisationSharing.lower(),
thumbnail=thumbnail,
is_invitation_only=True,
sort_field = 'title',
sort_order ='asc',
is_view_only=False,
users_update_items=False)
printMessage(title + " group item properties updated - " + itemID + "...","info")
# If users are provided
if members:
printMessage("Adding the following users to the group - " + members + "...","info")
members = members.split(",")
# Add users to the group
group.add_users(members)
# End of create group function
# Start of create web mapping application function
def createWebApplication(portalURL,gisPortal,title,summary,description,tags,thumbnail,organisationSharing,groupSharing,dataFile):
printMessage("Creating web mapping application - " + title + "...","info")
# FUNCTION - Search portal to see if web application is already there
webApplicationExists = searchPortalForItem(gisPortal,title,"Web Mapping Application")
# Create the web mapping application properties
itemProperties = {'title':title,
'type':"Web Mapping Application",
'typeKeywords':"JavaScript,Map,Mapping Site,Online Map,Ready To Use,WAB2D,Web AppBuilder,Web Map",
'description':description,
'snippet':summary,
'tags':tags,
'thumbnail':thumbnail,
'access':organisationSharing.lower()}
item = ""
# If the web application has not been created
if (webApplicationExists == False):
# Add the web application
item = gisPortal.content.add(item_properties=itemProperties)
# Get the JSON data from the file if provided
jsonData = "{}"
if (dataFile):
# If the file exists
if (os.path.exists(dataFile)):
with open(dataFile) as jsonFile:
# Update the item ID
data = json.load(jsonFile)
data["appItemId"] = item.id
jsonData = json.dumps(data)
else:
printMessage(title + " web mapping application data does not exist - " + dataFile + "...","warning")
# Update the URL and data properties
itemProperties = {'url':portalURL + "/apps/webappviewer/index.html?id=" + item.id,
'text':jsonData}
item.update(itemProperties)
printMessage(title + " web mapping application created - " + item.id + "...","info")
# Web application already exists
else:
# Get the item ID
itemID = getIDforPortalItem(gisPortal,title,"web mapping application")
# Get the web application
item = gisPortal.content.get(itemID)
# Update the web application
item.update(itemProperties, thumbnail=thumbnail)
printMessage(title + " web mapping application item properties updated - " + itemID + "...","info")
# If sharing to group(s)
if (groupSharing) and (item):
printMessage("Sharing with the following groups - " + groupSharing + "...","info")
item.share(groups=groupSharing)
# End of create web mapping application function
# Start of create dashboard function
def createDashboard(gisPortal,title,summary,description,tags,thumbnail,organisationSharing,groupSharing,dataFile):
printMessage("Creating dashboard - " + title + "...","info")
# FUNCTION - Search portal to see if web application is already there
dashboardExists = searchPortalForItem(gisPortal,title,"Dashboard")
# Create the dashboard properties
itemProperties = {'title':title,
'type':"Dashboard",
'typeKeywords': "Dashboard,Operations Dashboard",
'description':description,
'snippet':summary,
'tags':tags,
'thumbnail':thumbnail,
'access':organisationSharing.lower()}
item = ""
# If the dashboard has not been created
if (dashboardExists == False):
# Add the dashboard
item = gisPortal.content.add(item_properties=itemProperties)
# Get the JSON data from the file if provided
jsonData = "{}"
if (dataFile):
# If the file exists
if (os.path.exists(dataFile)):
with open(dataFile) as jsonFile:
data = json.load(jsonFile)
jsonData = json.dumps(data)
else:
printMessage(title + " dashboard data does not exist - " + dataFile + "...","warning")
# Update the URL and data properties
itemProperties = {'text':jsonData}
item.update(itemProperties)
printMessage(title + " dashboard created - " + item.id + "...","info")
# Dashboard already exists
else:
# Get the item ID
itemID = getIDforPortalItem(gisPortal,title,"dashboard")
# Get the dashboard
item = gisPortal.content.get(itemID)
# Update the dashboard
item.update(itemProperties, thumbnail=thumbnail)
printMessage(title + " dashboard item properties updated - " + itemID + "...","info")
# If sharing to group(s)
if (groupSharing) and (item):
printMessage("Sharing with the following groups - " + groupSharing + "...","info")
item.share(groups=groupSharing)
# End of create dashboard function
# Start of create web map function
def createWebMap(gisPortal,title,summary,description,tags,thumbnail,organisationSharing,groupSharing,dataFile):
printMessage("Creating web map - " + title + "...","info")
# FUNCTION - Search portal to see if web map is already there
webmapExists = searchPortalForItem(gisPortal,title,"Web Map")
# Create the web map properties
itemProperties = {'title':title,
'type':"Web Map",
'description':description,
'snippet':summary,
'tags':tags,
'thumbnail':thumbnail,
'access':organisationSharing.lower()}
item = ""
# If the web map has not been created
if (webmapExists == False):
# Add the web map
item = gisPortal.content.add(item_properties=itemProperties)
# Get the JSON data from the file if provided
jsonData = "{}"
if (dataFile):
# If the file exists
if (os.path.exists(dataFile)):
with open(dataFile) as jsonFile:
jsonData = json.load(jsonFile)
# Add data to item properties
itemProperties['text'] = jsonData
else:
printMessage(title + " web map data does not exist - " + dataFile + "...","warning")
# Update the URL and data properties
itemProperties = {'text':jsonData}
item.update(itemProperties, thumbnail=thumbnail)
printMessage(title + " web map created - " + item.id + "...","info")
# Web map already exists
else:
# Get the item ID
itemID = getIDforPortalItem(gisPortal,title,"web map")
# Get the web map
item = gisPortal.content.get(itemID)
# Update the web map
item.update(itemProperties, thumbnail=thumbnail)
printMessage(title + " web map item properties updated - " + itemID + "...","info")
# If sharing to group(s)
if (groupSharing) and (item):
printMessage("Sharing with the following groups - " + groupSharing + "...","info")
item.share(groups=groupSharing)
# End of create web map function
# Start of create web scene function
def createWebScene(gisPortal,title,summary,description,tags,thumbnail,organisationSharing,groupSharing,dataFile):
printMessage("Creating web scene - " + title + "...","info")
# FUNCTION - Search portal to see if web scene is already there
webSceneExists = searchPortalForItem(gisPortal,title,"Web Scene")
# Create the form properties
itemProperties = {'title':title,
'type':"Web Scene",
'description':description,
'snippet':summary,
'tags':tags,
'thumbnail':thumbnail,
'access':organisationSharing.lower()}
item = ""
# If the web scene has not been created
if (webSceneExists == False):
printMessage("Function currently not supported...","info")
# Form already exists
else:
# Get the item ID
itemID = getIDforPortalItem(gisPortal,title,"web scene")
# Get the web scene
item = gisPortal.content.get(itemID)
# Update the web scene
item.update(itemProperties, thumbnail=thumbnail)
printMessage(title + " web scene item properties updated - " + itemID + "...","info")
# If sharing to group(s)
if (groupSharing) and (item):
printMessage("Sharing with the following groups - " + groupSharing + "...","info")
item.share(groups=groupSharing)
# End of create web scene function
# Start of create form function
def createForm(gisPortal,title,summary,description,tags,thumbnail,organisationSharing,groupSharing,dataFile):
printMessage("Creating form - " + title + "...","info")
# FUNCTION - Search portal to see if form is already there
formExists = searchPortalForItem(gisPortal,title,"Form")
# Create the form properties
itemProperties = {'title':title,
'type':"Form",
'description':description,
'snippet':summary,
'tags':tags,
'thumbnail':thumbnail,
'access':organisationSharing.lower()}
item = ""
# If the form has not been created
if (formExists == False):
printMessage("Function currently not supported...","info")
# Form already exists
else:
# Get the item ID
itemID = getIDforPortalItem(gisPortal,title,"form")
# Get the form
item = gisPortal.content.get(itemID)
# Update the form
item.update(itemProperties, thumbnail=thumbnail)
printMessage(title + " form item properties updated - " + itemID + "...","info")
# If sharing to group(s)
if (groupSharing) and (item):
printMessage("Sharing with the following groups - " + groupSharing + "...","info")
item.share(groups=groupSharing)
# End of create form function
# Start of create tile service function
def createTileService(gisPortal,title,summary,description,tags,thumbnail,organisationSharing,groupSharing,dataFile):
printMessage("Creating tile service - " + title + "...","info")
# FUNCTION - Search portal to see if form is already there
tileServiceExists = searchPortalForItem(gisPortal,title,"Map Service")
# Create the form properties
itemProperties = {'title':title,
'type':"Map Service",
'description':description,
'snippet':summary,
'tags':tags,
'thumbnail':thumbnail,
'access':organisationSharing.lower()}
item = ""
# If the tile service has not been created
if (tileServiceExists == False):
printMessage("Function currently not supported...","info")
# Tile service already exists
else:
# Get the item ID
itemID = getIDforPortalItem(gisPortal,title,"Map Service")
# Get the tile service
item = gisPortal.content.get(itemID)
# Update the tile service
item.update(itemProperties, thumbnail=thumbnail)
printMessage(title + " tile service item properties updated - " + itemID + "...","info")
# If sharing to group(s)
if (groupSharing) and (item):
printMessage("Sharing with the following groups - " + groupSharing + "...","info")
item.share(groups=groupSharing)
# End of create tile service function
# Start of create feature service function
def createFeatureService(gisPortal,title,summary,description,tags,thumbnail,organisationSharing,groupSharing,dataFile):
printMessage("Creating feature service - " + title + "...","info")
# FUNCTION - Search portal to see if feature service is already there
featureServiceExists = searchPortalForItem(gisPortal,title,"Feature Service")
# Create the feature service properties
itemProperties = {'title':title,
'type':"Feature Service",
'description':description,
'snippet':summary,
'tags':tags,
'thumbnail':thumbnail,
'access':organisationSharing.lower()}
item = ""
# If the feature service has not been created
if (featureServiceExists == False):
# Get the FGDB data from the file if provided
if (dataFile):
# If the file exists
if (os.path.exists(dataFile)):
# FUNCTION - Search portal to see if file geodatabase is already there
fgdbExists = searchPortalForItem(gisPortal,os.path.basename(dataFile),"File Geodatabase")
printMessage("Loading data from zip file (FGDB) - " + dataFile,"info")
# If the file geodatabase has not been created
if (fgdbExists == False):
# Upload file
fgdbItem = gisPortal.content.add({"title":title,"type":"File Geodatabase"},dataFile)
printMessage("FGDB uploaded - " + fgdbItem.id + "...", "info")
# File geodatabase already exists
else:
# Get the item ID
fgdbItemID = getIDforPortalItem(gisPortal,os.path.basename(dataFile),"File Geodatabase")
# Get the file geodatabase
fgdbItem = gisPortal.content.get(fgdbItemID)
# Upload file
updateResult = fgdbItem.update({"title":title,"type":"File Geodatabase"},dataFile)
if "error" in str(updateResult):
printMessage(updateResult,"error")
printMessage("FGDB updated - " + fgdbItem.id + "...", "info")
# Publish the feature service
item = fgdbItem.publish()
item.update(itemProperties)
printMessage(title + " feature service created - " + item.id + "...","info")
else:
printMessage(title + " FGDB data (.zip) does not exist - " + dataFile + "...","warning")
# Data not provided
else:
printMessage(title + " FGDB data (.zip) has not been provided...","warning")
# Feature service already exists
else:
# Get the item ID
itemID = getIDforPortalItem(gisPortal,title,"feature service")
# Get the feature service
item = gisPortal.content.get(itemID)
# Get the FGDB data from the file if provided
if (dataFile):
# If the file exists
if (os.path.exists(dataFile)):
# If there are layers in the item
if (item.layers):
# Get the feature layer to be updated - Assuming just one feature layer in service
featureLayers = item.layers
featureLayerUpdate = ""
for featureLayer in featureLayers:
featureLayerUpdate = featureLayer
# FUNCTION - Search portal to see if file geodatabase is already there
fgdbExists = searchPortalForItem(gisPortal,os.path.basename(dataFile),"File Geodatabase")
printMessage("Loading data from zip file (FGDB) - " + dataFile,"info")
# If the file geodatabase has not been created
if (fgdbExists == False):
fgdbItem = gisPortal.content.add({"title":title,"type":"File Geodatabase"},dataFile)
printMessage("FGDB uploaded - " + fgdbItem.id + "...", "info")
# File geodatabase already exists
else:
# Get the item ID
fgdbItemID = getIDforPortalItem(gisPortal,os.path.basename(dataFile),"File Geodatabase")
# Get the file geodatabase
fgdbItem = gisPortal.content.get(fgdbItemID)
# Upload file
updateResult = fgdbItem.update({"title":title,"type":"File Geodatabase"},dataFile)
if "error" in str(updateResult):
printMessage(updateResult,"error")
printMessage("FGDB updated - " + fgdbItem.id + "...", "info")
# Three publishing options
# - Append will truncate and append all records
# - Overwrite will republish and overwrite the existing service as append only works in ArcGIS Online
# - Republish will delete and recreate the service as overwrite was not working in portal when I was testing
updateMode = "Append" # "Republish", "Overwrite" or "Append"
if (updateMode.lower() == "republish"):
if (fgdbExists == True):
printMessage("Deleting feature service...", "info")
item.delete()
printMessage("Republishing feature service...", "info")
# Publish the feature service
item = fgdbItem.publish(overwrite=False)
printMessage(title + " feature service recreated - " + item.id + "...","info")
else:
printMessage(title + " - The FGDB for this feature service does not exist in portal...","error")
# Overwriting data
elif (updateMode.lower() == "overwrite"):
if (fgdbExists == True):
printMessage("Overwriting feature service...", "info")
# Publish the feature service
item = fgdbItem.publish(overwrite=True)
else:
printMessage(title + " - The FGDB for this feature service does not exist in portal...","error")
# Reloading data
else:
# Delete all features
printMessage("Deleting all data from feature layer...", "info")
deleteResult = featureLayerUpdate.delete_features(where="objectid > 0")
if "error" in str(deleteResult):
printMessage(deleteResult,"error")
# Add all the features
printMessage("Loading data into feature layer...", "info")
addResult = featureLayerUpdate.append(item_id=fgdbItem.id,
upload_format="filegdb",
upsert=False,
skip_updates=False,
use_globalids=False,
update_geometry=True,
rollback=False,
skip_inserts=False)
if "error" in str(addResult):
printMessage(addResult,"error")
# Get the new features
featureSet = featureLayerUpdate.query()
features = featureSet.features
printMessage("Record count - " + str(len(features)),"info")
else:
printMessage(title + " - There is an issue with the feature service...","error")
else:
printMessage(title + " FGDB data (.zip) does not exist - " + dataFile + "...","warning")
# Update the feature service
item.update(itemProperties, thumbnail=thumbnail)
printMessage(title + " feature service item properties updated - " + itemID + "...","info")
# If sharing to group(s)
if (groupSharing) and (item):
printMessage("Sharing with the following groups - " + groupSharing + "...","info")
item.share(groups=groupSharing)
# End of create feature service function
# Start of create feature service view function
def createFeatureServiceView(gisPortal,title,summary,description,tags,thumbnail,organisationSharing,groupSharing,dataFile):
printMessage("Creating feature service view - " + title + "...","info")
# FUNCTION - Search portal to see if feature service view is already there
featureServiceViewExists = searchPortalForItem(gisPortal,title,"Feature Service")
# Create the feature service view properties
itemProperties = {'title':title,
'type':"Feature Service",
'description':description,
'snippet':summary,
'tags':tags,
'thumbnail':thumbnail,
'access':organisationSharing.lower()}
item = ""
# If the feature service view has not been created
if (featureServiceViewExists == False):
if (dataFile):
# If the file exists
if (os.path.exists(dataFile)):
# Open file
with open(dataFile) as jsonFile:
jsonData = json.load(jsonFile)
# If name in config file
if "serviceName" in jsonData:
# If source URL in config
if "sourceItemID" in jsonData:
# Get the source feature service
sourceItem = gisPortal.content.get(jsonData["sourceItemID"])
sourceFeatureLayer = arcgis.features.FeatureLayerCollection.fromitem(sourceItem)
# Create the view
item = sourceFeatureLayer.manager.create_view(name=jsonData["serviceName"])
viewFeatureLayer = arcgis.features.FeatureLayerCollection.fromitem(item)
if "viewDefinition" in jsonData:
# For each layer in the feature service view
for layer in viewFeatureLayer.layers:
# Set the definition query
viewDef = {"viewDefinitionQuery" : jsonData["viewDefinition"]}
layer.manager.update_definition(viewDef)
else:
printMessage("viewDefinition does not exist in configuration file - " + dataFile + "...","warning")
featureLayerViewItem.update(itemProperties)
printMessage(title + " feature service view created - " + featureLayerViewItem.id + "...","info")
else:
printMessage("sourceItemID does not exist in configuration file - " + dataFile + "...","error")
else:
printMessage("name does not exist in configuration file - " + dataFile + "...","error")
else:
printMessage(title + " feature service view data does not exist - " + dataFile + "...","warning")
# Feature service view already exists
else:
# Get the item ID
itemID = getIDforPortalItem(gisPortal,title,"feature service")
# Get the feature service view
item = gisPortal.content.get(itemID)
# Update the feature service view
item.update(itemProperties, thumbnail=thumbnail)
printMessage(title + " feature service view item properties updated - " + itemID + "...","info")
# If sharing to group(s)
if (groupSharing) and (item):
printMessage("Sharing with the following groups - " + groupSharing + "...","info")
item.share(groups=groupSharing)
# End of create feature service view function
# Start of get ID for portal item function
def getIDforPortalItem(gisPortal,title,itemType):
itemID = ""
# If a group
if (itemType.lower() == "group"):
# Search portal to find item
searchItems = gisPortal.groups.search(query="title:" + title, sort_field='title', sort_order='asc', max_groups=1000)
# If a file geodatabase
elif (itemType.lower() == "file geodatabase"):
# Search portal to find item
searchItems = gisPortal.content.search(query=title, item_type=itemType, sort_field='title', sort_order='asc', max_items=1000)
else:
# Search portal to find item
searchItems = gisPortal.content.search(query="title:" + title, item_type=itemType, sort_field='title', sort_order='asc', max_items=1000)
for searchItem in searchItems:
# If file geodatabase item
if (itemType.lower() == "file geodatabase"):
itemID = searchItem.id
else:
# If search result matches
if (searchItem.title.lower().replace(" ", "") == title.lower().replace(" ", "")):
# If a group
if (itemType.lower() == "group"):
itemID = searchItem.id
else:
# If item type matches
if (searchItem.type.lower().replace(" ", "") == itemType.lower().replace(" ", "")):
itemID = searchItem.id
# Return item ID
return itemID
# End of get ID for portal item function
# Start of search portal for item function
def searchPortalForItem(gisPortal,title,itemType):
itemExists = False
# If a group
if (itemType.lower() == "group"):
# Search portal to see if group is already there
searchItems = gisPortal.groups.search(query="title:" + title, sort_field='title', sort_order='asc', max_groups=1000)
# If a file geodatabase
elif (itemType.lower() == "file geodatabase"):
# Search portal to find item
searchItems = gisPortal.content.search(query=title, item_type=itemType, sort_field='title', sort_order='asc', max_items=1000)
else:
# Search portal to see if item is already there
searchItems = gisPortal.content.search(query="title:" + title, item_type=itemType, sort_field='title', sort_order='asc', max_items=1000)
for searchItem in searchItems:
# If file geodatabase item
if (itemType.lower() == "file geodatabase"):
printMessage(title + " already exists - " + searchItem.id + "...","info")
itemExists = True
else:
# If search result matches
if (searchItem.title.lower().replace(" ", "") == title.lower().replace(" ", "")):
printMessage(title + " already exists - " + searchItem.id + "...","info")
itemExists = True
# Return item exists boolean
return itemExists
# End of search portal for item function
# Start of print and logging message function
def printMessage(message,type):
# If using ArcPy
if (useArcPy == "true"):
if (type.lower() == "warning"):
arcpy.AddWarning(message)
# Logging
if (enableLogging == "true"):
logger.warning(message)
elif (type.lower() == "error"):
arcpy.AddError(message)
# Logging
if (enableLogging == "true"):
logger.error(message)
else:
arcpy.AddMessage(message)
# Logging
if (enableLogging == "true"):
logger.info(message)
else:
print(message)
# Logging
if (enableLogging == "true"):
logger.info(message)
# End of print and logging message function
# Start of set logging function
def setLogging(logFile):
# Create a logger
logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.DEBUG)
# Setup log message handler
logMessage = logging.FileHandler(logFile)
# Setup the log formatting
logFormat = logging.Formatter("%(asctime)s: %(levelname)s - %(message)s", "%d/%m/%Y - %H:%M:%S")
# Add formatter to log message handler
logMessage.setFormatter(logFormat)
# Add log message handler to logger
logger.addHandler(logMessage)
return logger, logMessage
# End of set logging function
# Start of send email function
def sendEmail(message,attachment):
# Send an email
printMessage("Sending email...","info")
# Server and port information
smtpServer = smtplib.SMTP(emailServerName,emailServerPort)
smtpServer.ehlo()
smtpServer.starttls()
smtpServer.ehlo
# Setup content for email (In html format)
emailMessage = MIMEMultipart('alternative')
emailMessage['Subject'] = emailSubject
emailMessage['From'] = emailUser
emailMessage['To'] = emailTo
emailText = MIMEText(message, 'html')
emailMessage.attach(emailText)
# If there is a file attachment
if (attachment):
fp = open(attachment,'rb')
fileAttachment = email.mime.application.MIMEApplication(fp.read(),_subtype="pdf")
fp.close()
fileAttachment.add_header('Content-Disposition','attachment',filename=os.path.basename(attachment))
emailMessage.attach(fileAttachment)
# Login with sender email address and password
if (emailUser and emailPassword):
smtpServer.login(emailUser, emailPassword)
# Send the email and close the connection
smtpServer.sendmail(emailUser, emailTo, emailMessage.as_string())
# End of send email function
# This test allows the script to be used from the operating
# system command prompt (stand-alone), in a Python IDE,
# as a geoprocessing script tool, or as a module imported in
# another script
if __name__ == '__main__':
# If using ArcPy
if (useArcPy == "true"):
argv = tuple(arcpy.GetParameterAsText(i)
for i in range(arcpy.GetArgumentCount()))
else:
argv = sys.argv
# Delete the first argument, which is the script
del argv[0]
# Logging
if (enableLogging == "true"):
# Setup logging
logger, logMessage = setLogging(logFile)
# Log start of process
logger.info("Process started.")
# Setup the use of a proxy for requests
if (enableProxy == "true"):
# Setup the proxy
proxy = urllib2.ProxyHandler({requestProtocol : proxyURL})
openURL = urllib2.build_opener(proxy)
# Install the proxy
urllib2.install_opener(openURL)
mainFunction(*argv)
|
import pygame
from src.menu import Menu, Button
from src.screen import Screen
from src.game import Game
def runGame(screen, tutorialMenu, fakeNewsMenu):
game = Game(screen, tutorialMenu, fakeNewsMenu)
game.run(screen)
pygame.init()
screen = Screen((1920, 1080), "Stop the count!", fullScreen=False)
tutorialMenu = Menu(pygame.image.load("assets/img/buildWall.jpg"), [], True)
button_quitMenu = Button((10, 10), (60, 60), "Back", tutorialMenu.stop)
button_quitMenu.build(screen)
tutorialMenu.buttons.append(button_quitMenu)
fakeNewsMenu = Menu(pygame.image.load("assets/img/fakeNews.jpg"), [], True)
button_quitMenu = Button((10, 10), (60, 60), "Back", fakeNewsMenu.stop)
button_quitMenu.build(screen)
fakeNewsMenu.buttons.append(button_quitMenu)
button_play = Button((300, 400), (300, 60), "Play", runGame, screen=screen, tutorialMenu=tutorialMenu, fakeNewsMenu=fakeNewsMenu)
button_howToPlay = Button((300, 500), (300, 60), "How To Build Walls", tutorialMenu.run, screen=screen)
button_fakeNews = Button((300, 600), (300, 60), "Fake News", fakeNewsMenu.run, screen=screen)
button_fakeNews.build(screen)
button_quit = Button((300, 700), (300, 60), "Quit", exit)
button_play.build(screen)
button_howToPlay.build(screen)
button_quit.build(screen)
mainMenu = Menu(pygame.image.load("assets/img/background.jpg"), [button_play, button_howToPlay, button_fakeNews, button_quit])
def main():
mainMenu.run(screen)
if __name__ == "__main__":
main()
|
import tkinter
import tkinter.filedialog
import tkinter.ttk
from . import screens
class Interface(tkinter.Tk):
def __init__(self, control):
super(Interface, self).__init__()
self._control = control
self._create_widgets()
self._create_commands()
self._create_styles()
self.update_nav_state(1)
self.active_screen('url')
# flags
self._feedback_download = None
def start(self):
'''Starts the application loop.
'''
self.title('downloader v.0.0.0.1')
self.geometry('400x500')
self.mainloop()
def active_screen(self, screen):
'''
Unpack the current screen and pack another.
param screen : str
the screen name.
'''
screens = {
'url':{'frame':self._screen_url, 'value':1},
'dir':{'frame':self._screen_dir, 'value':2},
'download':{'frame':self._screen_download, 'value':3}}
current = self._container_screen.pack_slaves()
if current:
current[0].pack_forget()
frame, value = screens[screen].values()
self._nav_var.set(value)
frame.pack(fill='both', expand=True, padx=5, pady=5)
def update_nav_state(self, count):
'''
Active some nav buttons and disable others.
param count : int
numbers of nave buttons that will be activate.
'''
buttons = [self._nav_btn_url, self._nav_btn_dir,
self._nav_btn_download]
for button in buttons[:count]:
button['state'] = 'normal'
for button in buttons[count:]:
button['state'] = 'disabled'
def search_directory(self):
'''
Open the directory explorer.
return : str
'''
directory = tkinter.filedialog.askdirectory()
return '' if not directory else directory
def start_feedback_download(self, count=0):
feeds = ['downloading.', 'downloading..', 'downloading...']
self.screen_download.button['state'] = 'disabled'
self.screen_download.button['text'] = feeds[count%len(feeds)]
self._feedback_download = self.after(1000, self.start_feedback_download, count+1)
def stop_feedback_download(self):
if self._feedback_download:
self.screen_download.button['state'] = 'normal'
self.screen_download.button['text'] = 'Download'
self.after_cancel(self._feedback_download)
self._feedback_download = None
@property
def url(self):
'''Return the current url in entry.
'''
return self._screen_url.entry.get()
@property
def directory(self):
'''Return the current directory in entry.
'''
return self._screen_dir.entry.get()
@directory.setter
def directory(self, value):
self._screen_dir.entry['state'] = 'normal'
self._screen_dir.entry.delete(0, 'end')
self._screen_dir.entry.insert('end', value)
self._screen_dir.entry['state'] = 'readonly'
@property
def url_feedback(self):
return self._screen_url.url_feedback
@url_feedback.setter
def url_feedback(self, value):
self._screen_url.url_feedback = value
@property
def dir_feedback(self):
return self._screen_dir.dir_feedback
@dir_feedback.setter
def dir_feedback(self, value):
self._screen_dir.dir_feedback = value
@property
def screen_url(self):
return self._screen_url
@property
def screen_dir(self):
return self._screen_dir
@property
def screen_download(self):
return self._screen_download
def _create_widgets(self):
'''Create all widgets.
'''
self._create_screens()
self._create_nav_buttons()
def _create_commands(self):
'''Creae all commands.
'''
self._nav_btn_url['command'] = \
self._control.bind_open_screen_url
self._nav_btn_dir['command'] = \
self._control.bind_open_screen_dir
self._nav_btn_download['command'] = \
self._control.bind_open_screen_download
self._screen_url.button_next['command'] = \
self._control.bind_check_url
self._screen_dir.button_next['command'] = \
self._control.bind_check_directory
self._screen_dir.button_search['command'] = \
self._control.bind_search_directory
self._screen_download.button['command'] = \
self._control.bind_make_download
def _create_screens(self):
self._container_screen = tkinter.ttk.Frame(self)
self._container_screen.pack_propagate(False)
self._container_screen.pack(fill='both', expand=True)
self._screen_url = screens.ScreenUrl(
self._container_screen)
self._screen_dir = screens.ScreenDir(
self._container_screen)
self._screen_download = screens.ScreenDownload(
self._container_screen)
def _create_nav_buttons(self):
container = tkinter.ttk.Frame(self)
container['height'] = 25
container.pack_propagate(False)
container.pack(fill='x', padx=5, pady=5)
self._nav_var = tkinter.IntVar()
self._nav_btn_url = tkinter.ttk.Radiobutton(container)
self._nav_btn_url['variable'] = self._nav_var
self._nav_btn_url['value'] = 1
self._nav_btn_url.pack(side='left', expand=True)
self._nav_btn_dir = tkinter.ttk.Radiobutton(container)
self._nav_btn_dir['variable'] = self._nav_var
self._nav_btn_dir['value'] = 2
self._nav_btn_dir.pack(side='left', expand=True)
self._nav_btn_download = tkinter.ttk.Radiobutton(container)
self._nav_btn_download['variable'] = self._nav_var
self._nav_btn_download['value'] = 3
self._nav_btn_download.pack(side='left', expand=True)
def _create_styles(self):
'''Define widgets styles.
'''
color_1 = '#90A380'
color_2 = '#603959'
color_3 = '#A153A1'
color_4 = '#BE7AC7'
color_5 = '#DCB8E8'
color_6 = '#E7D5FC'
style = tkinter.ttk.Style()
# main window
self.config(background=color_1)
# frames
style.configure('TFrame', background=color_1)
# buttons
style.configure('TButton', background=color_6)
style.configure('TButton', foreground=color_2)
style.configure('TButton', width=15)
style.configure('TButton', font=('Georgia', 14, 'bold'))
style.map('TButton', background=[('active', color_2)])
style.map('TButton', foreground=[('active', color_6)])
# labels
style.configure('TLabel', background=color_1)
style.configure('TLabel', foreground=color_2)
style.configure('TLabel', anchor='c')
style.configure('TLabel', font=('Georgia', 14, 'bold'))
# entrys
style.configure('TEntry', fieldbackground=color_6)
style.configure('TEntry', background=color_1)
style.configure('TEntry', relief='raised')
style.configure('TEntry', borderwidth=3)
self._screen_url.entry['font'] = font=('Georgia', 14, 'bold')
self._screen_dir.entry['font'] = font=('Georgia', 14, 'bold')
# radios
style.configure('TRadiobutton', background=color_1)
style.map('TRadiobutton', background=[('disabled', color_1)])
style.map('TRadiobutton', background=[('active', color_2)])
|
"""
`minion-ci` is a minimalist, decentralized, flexible Continuous Integration Server for hackers.
This module contains helper functions for the minion-ci server and client
:copyright: (c) by Timo Furrer
:license: MIT, see LICENSE for details
"""
import os
from urllib.parse import urlparse
def get_repository_name(repository_url):
"""
Returns the repository name from the
given repository_url.
"""
parts = urlparse(repository_url)
return os.path.splitext(os.path.basename(parts.path))[0]
|
import time
from collections import deque
from threading import Lock, Condition, Thread
from senders.gbn_sender import GoBackNSender
from senders.udt_sender import UDTSender, LossyUDTSender, CorruptingUDTSender
from helpers.logger_utils import get_stdout_logger
from receivers.udt_receiver import UDTReceiver, InterruptableUDTReceiver
from senders.sr_sender import SelectiveRepeatSender
logger = get_stdout_logger('gbn_receiver', 'DEBUG')
class GoBackNReceiver:
def __init__(self, max_seq_num=30, loss_prob=0):
self.udt_receiver = InterruptableUDTReceiver(UDTReceiver())
self.udt_listening_receiver = InterruptableUDTReceiver(UDTReceiver())
self.max_seq_num = max_seq_num + 1
self.expected_seq_num = 1
self.previous_seq_num = 0
self.data_queue = deque()
self.data_queue_cv = Condition()
self.done_receiving = False
self.waiting_to_close = False
self.loss_prob = loss_prob
self.closing_cv = Condition()
def start_data_waiter(self):
t = Thread(target=self.wait_for_data)
t.daemon = True
t.start()
def wait_for_data(self):
while not self.done_receiving:
packet, sender_address = self.udt_receiver.receive()
logger.info(f'received {packet.data},'
f' with seq num {packet.seq_number}, expecting {self.expected_seq_num}'
f' from {sender_address}')
if packet.seq_number == self.expected_seq_num:
self.send_ack(packet.seq_number, sender_address)
with self.data_queue_cv:
self.data_queue.append(packet)
logger.debug(f'notify {len(self.data_queue)}')
self.data_queue_cv.notify()
else:
self.send_ack(self.previous_seq_num, sender_address, is_duplicate=True)
def send_ack(self,seq_number, sender_address, is_duplicate=False):
logger.debug(f'sending an ack for {seq_number}')
logger.debug(f'expected seq num just before sending ack = {self.expected_seq_num}')
udt_sender = CorruptingUDTSender(LossyUDTSender(UDTSender.from_udt_receiver(self.udt_receiver, *sender_address),
self.loss_prob), self.loss_prob)
udt_sender.send_ack(seq_number)
logger.info(f'sent an Ack with seq number {seq_number} to {sender_address}')
if not is_duplicate:
self.previous_seq_num = self.expected_seq_num
self.expected_seq_num = self.get_next_num(self.expected_seq_num, 1)
logger.debug(f'expected seq num after adjusting for data = {self.expected_seq_num}')
def get_packet(self):
with self.data_queue_cv:
self.data_queue_cv.wait_for(lambda : len(self.data_queue) > 0)
pkt = self.data_queue.popleft()
logger.info(f'delivering packet with data {pkt.data} to upper layer')
return pkt
@classmethod
def from_sender(cls, gbn_sender, max_seq_num=-1, loss_prob=0):
gbn_receiver = cls(max_seq_num=max_seq_num,loss_prob=loss_prob)
gbn_receiver.udt_receiver = InterruptableUDTReceiver(UDTReceiver.from_udt_sender(gbn_sender.udt_sender))
return gbn_receiver
def get_next_num(self, num, delta):
"""
returns the next number in the sequence modulo the max sequence number
given we start counting at 1 not 0
:param num: the number to increment
:param delta: by how much we want to increment this number
:return:
"""
next_num = (num + delta) % self.max_seq_num
return next_num + ((num + delta)//self.max_seq_num) if next_num == 0 else next_num
def listen(self, port):
"""
This sets up the receiver to start listening for incoming connections
on the port passed in as a parameter.
:param port:
"""
self.udt_listening_receiver.bind(port)
self.is_listening = True
def accept(self, callback, **sender_args):
def extended_callback(init_packet, sr_sender):
time.sleep(1)
callback(init_packet, sr_sender)
if not self.is_listening:
raise TypeError('non listening receiver cannot accept connections')
init_packet, sender_address = self.udt_listening_receiver.receive()
logger.info( f'(listener) : received {init_packet.data} from {sender_address}')
self.send_ack(init_packet.seq_number, sender_address)
client_thread = Thread(target=extended_callback,
args=(init_packet, GoBackNSender(*sender_address, **sender_args)))
client_thread.daemon = True
client_thread.start()
return client_thread
def close(self):
with self.closing_cv:
while len(self.data_queue) > 0:
self.closing_cv.wait()
time.sleep(10) # wait for half a millisecond in case this is just a pause due to delays
logger.debug('woke up')
if len(self.data_queue) > 0:
self.close()
self.done_receiving = True
logger.debug('closing client')
|
from dataclasses import dataclass
from typing import Optional, List
@dataclass(frozen=True)
class Country:
country_code: Optional[str]
country_id: Optional[int]
is_licensed: Optional[bool]
name: Optional[str]
@dataclass(frozen=True)
class Countries:
countries: List[Country]
|
import random
import logging
import pandas as pd
import numpy as np
import altair as alt
from sklearn.preprocessing import LabelBinarizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import sg_covid_impact
from sg_covid_impact.extract_salient_terms import make_glass_ch_merged
from sg_covid_impact.make_sic_division import (
make_section_division_lookup,
)
from sg_covid_impact.utils.altair_save_utils import (
google_chrome_driver_setup,
save_altair,
)
from sg_covid_impact.utils.altair_s3 import export_chart
from sg_covid_impact.sic import load_sic_taxonomy, extract_sic_code_description
from nltk.corpus import stopwords
STOPWORDS = stopwords.words("english")
project_dir = sg_covid_impact.project_dir
def train_model(gl_sector):
"""Trains a logistic model on the Glass data"""
logging.info(f"Training with {str(len(gl_sector_sample))}")
# Count vectorise the descriptions
logging.info("Pre-processing")
# One hot encoding for labels
# Create array of divisions
labels = np.array(np.array(gl_sector_sample["division"]))
# Create the features (target and corpus)
lb = LabelBinarizer()
lb.fit(labels)
y = lb.transform(labels)
# Count vectorised corpus
corpus = list(gl_sector_sample["description"])
count_vect = TfidfVectorizer(
stop_words=STOPWORDS,
ngram_range=(1, 2),
min_df=20,
max_df=0.1,
max_features=10000,
).fit(corpus)
X = count_vect.transform(corpus)
logging.info(X.shape)
# Train, test splits
X_train, X_test, Y_train, Y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Initialise the model
m = OneVsRestClassifier(
LogisticRegression(C=1, class_weight="balanced", solver="liblinear")
)
# Train the model
logging.info("Training")
m.fit(X_train, Y_train)
return lb, count_vect, X_test, Y_test, m
def validate_results_single(prob_vector, true_label, thres=0.1):
"""Compares predicted labels with actual labels for a single observation
Args:
prob_vector (series): sector probability vector
true_label (str): true value of the sector
thres (float): minimum threshold to consider that a sector has been
classified
"""
# Is the actual label the top predicted label?
true_is_top = prob_vector.idxmax() == true_label
# Is the actual label above 0.5?
true_is_high = prob_vector[true_label] > 0.5
# Is the actual label in the top 5 predicted labels?
# Is the actual label in the top 5 and 10 predicted labels?
prob_vector_sorted = prob_vector.sort_values(ascending=False).index
true_in_top_5, true_in_top_10 = [
true_label in prob_vector_sorted[:n] for n in [5, 10]
]
# Is the actual label predicted at all (prob >thres)
true_is_predicted = prob_vector[true_label] > thres
# Is the predicted label in the same division as the top predicted label
shared_section = (
div_sect_lookup[prob_vector.idxmax()] == div_sect_lookup[true_label]
)
outputs = pd.Series(
[
true_is_top,
true_is_high,
true_in_top_5,
true_in_top_10,
true_is_predicted,
shared_section,
],
index=[
"true_top",
"true_high",
"true_top_5",
"true_top_10",
"true_predicted",
"same_section",
],
)
return outputs
def validate_results(pred_df, labels, thres=0.2):
"""Compares predicted labels with actual labels
Args:
pred_df (df): all prediction probabilities
labels (series): labels
thres (float): minimum threshold to consider a sector present in the predictions
"""
out = pd.DataFrame(
[
validate_results_single(pred_df.iloc[n], labels[n], thres=thres)
for n in np.arange(0, len(pred_df))
]
)
out["true_label"] = labels
return out
def process_validation_outputs(out):
"""Process model validation so we can visualise it"""
model_performance_by_division = (
out.groupby("true_label")
.mean()
.reset_index(drop=False)
.melt(id_vars=["true_label"], var_name="metric", value_name="share")
.assign(section=lambda x: x["true_label"].map(div_sect_lookup))
.assign(description=lambda x: x["true_label"].map(div_code_description))
.assign(section_name=lambda x: x["section"].map(section_name_lookup))
)
sort_divs = (
model_performance_by_division.query("metric == 'true_predicted'")
.sort_values(["section", "share"], ascending=[True, False])["true_label"]
.to_list()
)
sort_vars = ["true_top", "true_high", "true_top_5", "true_top_10"]
model_performance_by_division_selected = model_performance_by_division.loc[
model_performance_by_division["metric"].isin(sort_vars)
]
return model_performance_by_division_selected, sort_divs
def plot_model_performance(perf, sort_divisions):
perf_chart = (
alt.Chart(perf)
.mark_bar()
.encode(
y=alt.Y(
"true_label",
sort=sort_divisions,
axis=alt.Axis(ticks=False, labels=False),
),
x="share",
color=alt.Color("section_name:O", scale=alt.Scale(scheme="category20")),
tooltip=["true_label", "description", "share"],
column=alt.Column("metric", sort=sort_divisions),
)
.resolve_scale(x="independent")
).properties(width=100, height=500)
return perf_chart
if __name__ == "__main__":
train_test_size = 300000
div_code_description = extract_sic_code_description(load_sic_taxonomy(), "Division")
div_sect_lookup, section_name_lookup = make_section_division_lookup()
logging.info("Creating glass - ch dataset")
gl_sector = make_glass_ch_merged()
# Process and sample
gl_sector = gl_sector.dropna(axis=0, subset=["description", "division"])
gl_sector = gl_sector.loc[
[len(x) > 300 for x in gl_sector["description"]]
].reset_index(drop=True)
gl_sector_sample = gl_sector.loc[
random.sample(list(np.arange(len(gl_sector))), train_test_size)
]
# Train model
lb, count_vect, X_test, Y_test, model = train_model(gl_sector_sample)
# Validation
test_labels = model.predict_proba(X_test)
# How does the real label distribution relate to the test label distribution
actuals = lb.inverse_transform(Y_test)
preds_df = pd.DataFrame(test_labels, columns=lb.classes_)
# Validate results
out = validate_results(preds_df, actuals, thres=0.05)
perf, sort_divisions = process_validation_outputs(out)
# Visualise validation outputs
driver = google_chrome_driver_setup()
perf_chart = plot_model_performance(perf, sort_divisions)
save_altair(perf_chart, "appendix_model_validation", driver=driver)
export_chart(perf_chart, "appendix_model_validation")
# Apply model to population of companies and save results
X_all = count_vect.transform(gl_sector["description"])
# This creates a df with predicted probabilities for all data in the corpus
preds_all = pd.DataFrame(model.predict_proba(X_all), columns=lb.classes_)
preds_all["id_organisation"] = gl_sector["org_id"]
preds_all.to_csv(
f"{project_dir}/data/processed/glass_companies_predicted_labels_v2.csv",
index=False,
)
|
from tkinter import *
from tkinter import ttk
from tkinter.font import Font
ventana=Tk()
notebook=ttk.Notebook(ventana)
notebook.pack(fill="both", expand="yes", )
pes0=ttk.Frame(notebook)
pes1=ttk.Frame(notebook)
pes2=ttk.Frame(notebook)
pes3=ttk.Frame(notebook)
pes4=ttk.Frame(notebook)
notebook.add(pes0, text="RESTAURANTE EAN")
notebook.add(pes1, text="MENU DEL DIA")
notebook.add(pes2, text="RESERVE SU MESA")
notebook.add(pes3, text="INVENTARIO")
notebook.add(pes4, text="REGISTRO DE EMPLEADOS")
lbl=Label(pes0, text="BIENVENIDOS AL RESTAURANTE EAN", font=("boink let", 70)).place(x=385, y=200)
lbl1=Label(pes0, text="ESCOJA UNA DE LAS PAGINAS DE LA PARTE SUPERIOR", font=("boink let", 50), fg="dark green").place(x=350, y=560)
lbl2=Label(pes1, text="Elija su menu:", font=("fonty", 25)).place(x=900,y=30)
imagen=PhotoImage(file="hamburger.1.gif")
fondo=Label(pes1, image=imagen, height=180, width=235).place(x=100, y=110)
lbl=Label(pes1, text="HAMBURGUESA", font=("fonty", 18)).place(x=150,y=290)
lbl1=Label(pes1, text="$7.000", font=("fonty", 16)).place(x=185,y=310)
imagen1=PhotoImage(file="perro.gif")
fondo=Label(pes1, image=imagen1, height=180, width=235).place(x=480, y=110)
lbl=Label(pes1, text="PERRO CALIENTE", font=("fonty", 18)).place(x=525,y=290)
lbl2=Label(pes1, text="$5.000", font=("fonty", 16)).place(x=570,y=310)
imagen2=PhotoImage(file="papas.gif")
fondo=Label(pes1, image=imagen2, height=175, width=235).place(x=855, y=110)
lbl=Label(pes1, text="PAPAS FRITAS", font=("fonty", 18)).place(x=910,y=290)
lbl3=Label(pes1, text="$3.000", font=("fonty", 16)).place(x=943,y=310)
imagen3=PhotoImage(file="pizza.gif")
fondo=Label(pes1, image=imagen3,height=180, width=235).place(x=1200, y=110)
lbl=Label(pes1, text="PIZZA", font=("fonty", 18)).place(x=1285,y=290)
lbl4=Label(pes1, text="$6.000", font=("fonty", 16)).place(x=1284,y=310)
imagen4=PhotoImage(file="empanadas2.gif")
fondo=Label(pes1, image=imagen4, height=180, width=235).place(x=1560, y=110)
lbl=Label(pes1, text="EMPANADAS", font=("fonty", 18)).place(x=1628,y=290)
lbl5=Label(pes1, text="$6.000", font=("fonty", 16)).place(x=1650,y=310)
imagen5=PhotoImage(file="arepa.gif")
fondo=Label(pes1, image=imagen5, height=180, width=235).place(x=480, y=365)
lbl=Label(pes1, text="AREPA RELLENA", font=("fonty", 18)).place(x=525,y=549)
lbl6=Label(pes1, text="$5.000", font=("fonty", 16)).place(x=570,y=570)
imagen6=PhotoImage(file="Sandwich.gif")
fondo=Label(pes1, image=imagen6, height=180, width=235).place(x=100, y=365)
lbl=Label(pes1, text="SANDWICH", font=("fonty", 18)).place(x=160,y=550)
lbl7=Label(pes1, text="$12.000", font=("fonty", 16)).place(x=180,y=570)
imagen7=PhotoImage(file="Asado.gif")
fondo=Label(pes1, image=imagen7, height=180, width=235).place(x=855, y=365)
lbl=Label(pes1, text="POLLO ASADO", font=("fonty", 18)).place(x=910,y=549)
lbl8=Label(pes1, text="$20.000", font=("fonty", 16)).place(x=943,y=570)
imagen8=PhotoImage(file="Broaster.gif")
fondo=Label(pes1, image=imagen8, height=180, width=235).place(x=1200,y=365)
lbl=Label(pes1, text="POLLO A LA BROASTER", font=("fonty", 18)).place(x=1220,y=548)
lbl9=Label(pes1, text="$27.000", font=("fonty", 16)).place(x=1284,y=570)
imagen9=PhotoImage(file="Sushi.gif")
fondo=Label(pes1, image=imagen9, height=180, width=235).place(x=1560,y=365)
lbl=Label(pes1, text="SUSHI", font=("fonty", 18)).place(x=1653,y=547)
lbl10=Label(pes1, text="$17.000", font=("fonty", 16)).place(x=1650,y=570)
imagen10=PhotoImage(file="Arroz.gif")
fondo=Label(pes1, image=imagen10, height=180, width=235).place(x=100, y=625)
lbl=Label(pes1, text="ARROZ CON POLLO", font=("fonty", 18)).place(x=135,y=810)
lbl11=Label(pes1, text="$12.000", font=("fonty", 16)).place(x=180,y=830)
imagen12=PhotoImage(file="Salchi.gif")
fondo=Label(pes1, image=imagen12, height=180, width=235).place(x=480, y=625)
lbl=Label(pes1, text="SALCHIPAPA", font=("fonty", 18)).place(x=537,y=806)
lbl11=Label(pes1, text="$12.000", font=("fonty", 16)).place(x=560,y=828)
imagen13=PhotoImage(file="Costillitas.gif")
fondo=Label(pes1, image=imagen13, height=180, width=235).place(x=855, y=625)
lbl=Label(pes1, text="COSTILLITAS", font=("fonty", 18)).place(x=910,y=806)
lbl11=Label(pes1, text="$12.000", font=("fonty", 16)).place(x=938,y=828)
imagen14=PhotoImage(file="Nachos.gif")
fondo=Label(pes1, image=imagen14, height=180, width=235).place(x=1200,y=625)
lbl=Label(pes1, text="NACHOS CON QUESO", font=("fonty", 18)).place(x=1225,y=806)
lbl9=Label(pes1, text="$27.000", font=("fonty", 16)).place(x=1284,y=828)
imagen15=PhotoImage(file="Tacos.gif")
fondo=Label(pes1, image=imagen15, height=180, width=235).place(x=1560,y=625)
lbl=Label(pes1, text="TACOS", font=("fonty", 18)).place(x=1650,y=806)
lbl10=Label(pes1, text="$17.000", font=("fonty", 16)).place(x=1650,y=828)
ventana.geometry("3000x3000")
ventana.mainloop()
|
from .schema import *
from .api import *
from . import examples
from ...datasets import (
list_datasets,
load_dataset
)
from ... import expr
from ...expr import datum
from .display import VegaLite, renderers
from .data import (
pipe, curry, limit_rows,
sample, to_json, to_csv, to_values,
default_data_transformer,
data_transformers
)
|
from ckeditor.widgets import CKEditorWidget
from django import forms
from django.contrib import admin
from .models import Picture, Album, Video
# Register your models here.
class InlinePicture(admin.TabularInline):
model = Picture
class AlbumAdmin(admin.ModelAdmin):
inlines = [InlinePicture]
class VideoContentForm(forms.ModelForm):
content = forms.CharField(widget=CKEditorWidget())
class Meta:
model = Video
fields = '__all__'
class VideoAdmin(admin.ModelAdmin):
form = VideoContentForm
admin.site.register(Video, VideoAdmin)
admin.site.register(Album, AlbumAdmin)
|
# pylint: disable=import-error
# pylint: disable=no-member
from copy import deepcopy
import numpy as np
import time
import torch
import abc
from termcolor import cprint
from gym.spaces import Box
import torch.nn as nn
import pickle as pkl
import scipy
import flare.kindling as fk
from flare.kindling import ReplayBuffer
from typing import Optional, Union, Callable, Tuple, List
import pytorch_lightning as pl
from flare.kindling.datasets import QPolicyGradientRLDataset
from argparse import Namespace
import sys
import gym
import pybullet_envs
class BaseQPolicyGradient(pl.LightningModule):
def __init__(
self,
env_fn: Callable,
actorcritic: Callable,
epochs: int,
seed: Optional[int] = 0,
steps_per_epoch: Optional[int] = 4000,
horizon: Optional[int] = 1000,
replay_size: Optional[int] = int(1e6),
gamma: Optional[float] = 0.99,
polyak: Optional[float] = 0.95,
pol_lr: Optional[float] = 1e-3,
q_lr: Optional[float] = 1e-3,
hidden_sizes: Optional[Union[tuple, list]] = (256, 128),
bs: Optional[int] = 100,
warmup_steps: Optional[int] = 10000,
update_after: Optional[int] = 1000,
update_every: Optional[int] = 50,
act_noise: Optional[float] = 0.1,
num_test_episodes: Optional[int] = 10,
buffer: Optional[float] = ReplayBuffer,
hparams = None
):
super().__init__()
if hparams is None:
pass
else:
self.hparams = hparams
self.env, self.test_env = env_fn(), env_fn()
self.ac = actorcritic(
self.env.observation_space.shape[0],
self.env.action_space,
hidden_sizes=hidden_sizes,
)
self.buffer = buffer(
self.env.observation_space.shape,
self.env.action_space.shape,
replay_size,
)
torch.manual_seed(seed)
np.random.seed(seed)
self.env.seed(seed)
self.steps_per_epoch = steps_per_epoch
self.tracker_dict = {}
self.horizon = horizon
self.num_test_episodes = num_test_episodes
self.t = 0
self.start = 0
self.warmup_steps = warmup_steps
self.update_after = update_after
self.update_every = update_every
self.act_noise = act_noise
self.replay_size = replay_size
self.gamma = gamma
self.polyak = polyak
self.pol_lr = pol_lr
self.q_lr = q_lr
self.hidden_sizes = hidden_sizes
self.bs = bs
self.act_dim = self.env.action_space.shape[0]
self.act_limit = self.env.action_space.high[0]
self.steps = self.steps_per_epoch * epochs
self.ac_targ = deepcopy(self.ac)
for param in self.ac_targ.parameters():
param.requires_grad = False
self.saver = fk.Saver(out_dir='tmp')
def get_name(self):
return self.__class__.__name__
def on_train_start(self):
self.inner_loop(self.steps)
def forward(self, x, a):
return self.ac(x, a)
@abc.abstractmethod
def configure_optimizers(self):
"""Function to initialize optimizers"""
return
@abc.abstractmethod
def calc_pol_loss(self, data):
"""Function to compute policy loss"""
return
@abc.abstractmethod
def calc_qfunc_loss(self, data):
"""Function to compute q-function loss"""
return
@abc.abstractmethod
def training_step(self, data, timer=None):
"""Update rule for algorithm"""
return
def training_step_end(
self,
step_dict: dict
) -> dict:
r"""
Method for end of training step. Makes sure that episode reward and length info get added to logger.
Args:
step_dict (dict): dictioanry from last training step.
Returns:
step_dict (dict): dictionary from last training step with episode return and length info from last epoch added to log.
"""
step_dict['log'] = self.add_to_log_dict(step_dict['log'])
return step_dict
def add_to_log_dict(self, log_dict) -> dict:
r"""
Adds episode return and length info to logger dictionary.
Args:
log_dict (dict): Dictionary to log to.
Returns:
log_dict (dict): Modified log_dict to include episode return and length info.
"""
log_dict.update(self.tracker_dict)
return log_dict
def train_dataloader(self):
dataset = QPolicyGradientRLDataset(self.buffer.sample_batch(self.bs))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=self.bs)
return dataloader
def get_action(self, o, noise_scale):
a = self.ac.act(torch.as_tensor(o, dtype=torch.float32))
a += noise_scale * np.random.randn(self.act_dim)
return np.clip(a, -self.act_limit, self.act_limit)
def test_agent(self, num_test_episodes, max_ep_len):
test_return = []
test_length = []
for j in range(num_test_episodes):
o, d, ep_ret, ep_len = self.test_env.reset(), False, 0, 0
while not (d or (ep_len == max_ep_len)):
# Take deterministic actions at test time (noise_scale=0)
o, r, d, _ = self.test_env.step(self.get_action(o, 0))
ep_ret += r
ep_len += 1
test_return.append(ep_ret)
test_length.append(ep_len)
trackit = dict(MeanTestEpReturn=np.mean(test_return), MeanTestEpLength=np.mean(test_length))
return trackit
def update(self):
dataloader = self.train_dataloader()
for i, batch in enumerate(dataloader):
out1 = self.training_step(batch, i, 0)
out2 = self.training_step(batch, i, 1)
self.tracker_dict.update(out1)
self.tracker_dict.update(out2)
def inner_loop(self, steps):
max_ep_len = self.horizon
state, episode_return, episode_length = self.env.reset(), 0, 0
rewlst = []
lenlst = []
for i in range(self.start, steps):
# Main loop: collect experience in env and update/log each epoch
# Until start_steps have elapsed, randomly sample actions
# from a uniform distribution for better exploration. Afterwards,
# use the learned policy (with some noise, via act_noise).
if self.t > self.warmup_steps:
action = self.get_action(state, self.act_noise)
else:
action = self.env.action_space.sample()
# Step the env
next_state, reward, done, _ = self.env.step(action)
episode_return += reward
episode_length += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
done = False if episode_length == max_ep_len else done
# Store experience to replay buffer
self.buffer.store(state, action, reward, next_state, done)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
state = next_state
# End of trajectory handling
if done or (episode_length == max_ep_len):
rewlst.append(episode_return)
lenlst.append(episode_length)
state, episode_return, episode_length = self.env.reset(), 0, 0
self.t += 1
if self.t > self.update_after and self.t % self.update_every == 0:
trackit = {
"MeanEpReturn": np.mean(rewlst),
"StdEpReturn": np.std(rewlst),
"MaxEpReturn": np.max(rewlst),
"MinEpReturn": np.min(rewlst),
"MeanEpLength": np.mean(lenlst),
}
self.tracker_dict.update(trackit)
self.update()
# End of epoch handling
if (self.t + 1) % self.steps_per_epoch == 0:
# Test the performance of the deterministic version of the agent.
testtrack = self.test_agent(
num_test_episodes=self.num_test_episodes, max_ep_len=max_ep_len
)
self.tracker_dict.update(testtrack)
self.printdict()
self.start = i
def printdict(self, out_file: Optional[str] = sys.stdout) -> None:
r"""
Print the contents of the epoch tracking dict to stdout or to a file.
Args:
out_file (sys.stdout or string): File for output. If writing to a file, opening it for writing should be handled in :func:`on_epoch_end`.
"""
self.print("\n", file=out_file)
for k, v in self.tracker_dict.items():
self.print(f"{k}: {v}", file=out_file)
self.print("\n", file=out_file)
def on_epoch_end(self) -> None:
r"""
Print tracker_dict, reset tracker_dict, and generate new data with inner loop.
"""
self.printdict()
self.saver.store(**self.tracker_dict)
self.tracker_dict = {}
self.inner_loop(self.steps)
def on_train_end(self):
self.saver.save()
def runner(
env_name: str,
algo: BaseQPolicyGradient,
ac: nn.Module,
epochs: Optional[int] = 10000,
steps_per_epoch: Optional[int] = 4000,
bs: Optional[Union[int, None]] = 50,
hidden_sizes: Optional[Union[Tuple, List]] = (256, 256),
gamma: Optional[float] = 0.99,
hparams: Optional[Namespace] = None,
seed: Optional[int] = 0
):
r"""
Runner function to train algorithms in env.
Args:
algo (BasePolicyGradient subclass): The policy gradient algorithm to run. Included are A2C, PPO, and REINFORCE.
ac (nn.Module): Actor-Critic network following same API as :func:`~FireActorCritic`.
epochs (int): Number of epochs to train for.
steps_per_epoch (int): Number of agent - environment interaction steps to train on each epoch.
minibatch_size (int): Size of minibatches to sample from the batch collected over the epoch and train on. Default is None. When set to None, trains on minibatches one tenth the size of the full batch.
hidden_sizes (tuple or list): Hidden layer sizes for MLP Policy and MLP Critic.
gamma (float): Discount factor for return discounting and GAE-Lambda.
hparams (Namespace): Hyperparameters to log. Defaults to None.
seed (int): Random seeding for environment, PyTorch, and NumPy.
"""
env = lambda: gym.make(env_name)
agent = algo(
env,
ac,
epochs=epochs,
hidden_sizes=hidden_sizes,
seed=seed,
steps_per_epoch=steps_per_epoch,
bs=bs,
gamma=gamma,
hparams=hparams
)
trainer = pl.Trainer(
reload_dataloaders_every_epoch=True,
early_stop_callback=False,
max_epochs=epochs
)
trainer.fit(agent)
|
from ftfy.fixes import (
fix_encoding, fix_encoding_and_explain, apply_plan, possible_encoding,
remove_control_chars, fix_surrogates
)
from ftfy.badness import sequence_weirdness
import unicodedata
import sys
# Most single-character strings which have been misencoded should be restored.
def test_bmp_characters():
for index in range(0xa0, 0xfffd):
char = chr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn', 'Cs', 'Mc', 'Mn', 'Sk'):
garble = char.encode('utf-8').decode('latin-1')
# Exclude characters whose re-encoding is protected by the
# 'sequence_weirdness' metric
if sequence_weirdness(garble) >= 0:
garble2 = char.encode('utf-8').decode('latin-1').encode('utf-8').decode('latin-1')
for garb in (garble, garble2):
fixed, plan = fix_encoding_and_explain(garb)
assert fixed == char
assert apply_plan(garb, plan) == char
def test_possible_encoding():
for codept in range(256):
char = chr(codept)
assert possible_encoding(char, 'latin-1')
def test_byte_order_mark():
assert fix_encoding('') == '\ufeff'
def test_control_chars():
text = (
"\ufeffSometimes, \ufffcbad ideas \x7f\ufffalike these characters\ufffb "
"\u206aget standardized\U000E0065\U000E006E.\r\n"
)
fixed = "Sometimes, bad ideas like these characters get standardized.\r\n"
assert remove_control_chars(text) == fixed
def test_emoji_variation_selector():
# The hearts here are explicitly marked as emoji using the variation
# selector U+FE0F. This is not weird.
assert sequence_weirdness('❤\ufe0f' * 10) == 0
def test_emoji_skintone_selector():
# Dear heuristic, you can't call skin-tone selectors weird anymore.
# We welcome Santa Clauses of all colors.
assert sequence_weirdness('🎅🏿🎅🏽🎅🏼🎅🏻') == 0
def test_surrogates():
assert fix_surrogates('\udbff\udfff') == '\U0010ffff'
assert fix_surrogates('\ud800\udc00') == '\U00010000'
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
# GP means
class decaying_exponential(nn.Module):
def __init__(self, dims, a, b, learnable=[True, True]):
"""
:param int neurons: the number of output dimensios
:param float a: initial value for all :math:`a` tensor entries
:param float b: initial value for all :math:`b` tensor entries
:param list learnable: list of booleans indicating whether :math:`a` and :math:`b` are learnable
"""
super().__init__()
if learnable[0]:
self.register_parameter('a', Parameter(a*torch.ones(1, dims, 1)))
else:
self.register_buffer('a', a*torch.ones(1, dims, 1))
if learnable[1]:
self.register_parameter('b', Parameter(b*torch.ones(1, dims, 1)))
else:
self.register_buffer('b', b*torch.ones(1, dims, 1))
def forward(self, input):
"""
:param torch.tensor input: input of shape (K, T, D), only the first dimension in D is considered as time
:returns: output of shape (K, N, T)
:rtype: torch.tensor
"""
return self.a*torch.exp(-input[:, None, :, 0]/self.b)
|
import json
from jupyter_server.base.handlers import APIHandler
from jupyter_server.utils import url_path_join
import tornado
import pkg_resources
import os
import io
import zipfile
import pooch
_wasm_filenames = {
"wdirect.wasm": (
"https://unpkg.com/emulators@0.0.55/dist/wdirect.wasm",
"11bff62af123a3e693ceef43bc47ba7ba7aeea53a28f4a3c9772954b4d0d982a",
),
"wdirect.js": (
"https://unpkg.com/emulators@0.0.55/dist/wdirect.js",
"6c94981b57b0c8ffa8d410f134a6342a2ad3420319c1cb6738b1c445e1756959",
),
}
from .utils import make_zipfile
_default_components = ["dosbox.conf", "jsdos.json"]
DEFAULT_ZIP = {}
def _generate_zip(bundle_name="bundle"):
if bundle_name in DEFAULT_ZIP:
return DEFAULT_ZIP[bundle_name]
_b = io.BytesIO()
with zipfile.ZipFile(_b, "w") as f:
f.writestr(".jsdos/", "")
for fn in _default_components:
data = pkg_resources.resource_stream(
"jupyterlab_dosbox", os.path.join("bundles", fn)
).read()
f.writestr(os.path.join(".jsdos", fn), data)
_b.seek(0)
DEFAULT_ZIP[bundle_name] = _b.read()
return DEFAULT_ZIP[bundle_name]
if __name__ == "__main__":
with open("null_bundle.zip", "wb") as f:
f.write(_generate_zip())
class RouteHandler(APIHandler):
# The following decorator should be present on all verb methods (head, get, post,
# patch, put, delete, options) to ensure only authorized user can request the
# Jupyter server
@tornado.web.authenticated
def get(self, *args, **kwargs):
# This needs to be implemented
filenames = {}
for fn in _default_components:
data = pkg_resources.resource_stream(
"jupyterlab_dosbox", os.path.join("bundles", fn)
).read()
filenames[os.path.join(".jsdos", fn)] = data
self.finish(make_zipfile(filenames))
class RouteWasmHandler(APIHandler):
@tornado.web.authenticated
def get(self, mod, extension):
fn = mod + "." + extension
if fn not in _wasm_filenames:
self.send_error(404)
# Check for override
override_path = os.path.join(
pkg_resources.resource_filename("jupyterlab_dosbox", "debug_bundles"), fn
)
if os.path.exists(override_path):
self.finish(open(override_path, "rb").read())
return
rpath = pkg_resources.resource_filename("jupyterlab_dosbox", "bundles")
lfn = pooch.retrieve(*_wasm_filenames[fn], path=rpath)
self.finish(open(lfn, "rb").read())
def setup_handlers(web_app):
host_pattern = ".*$"
base_url = web_app.settings["base_url"]
route_pattern = url_path_join(
base_url, "jupyterlab_dosbox", "bundles", "null_bundle.jsdos(\.changed)?"
)
handlers = [(route_pattern, RouteHandler)]
web_app.add_handlers(host_pattern, handlers)
route_wasm_modules = url_path_join(
base_url, "jupyterlab_dosbox", "wasm", "(.*).(js|wasm)"
)
handlers_wasm = [(route_wasm_modules, RouteWasmHandler)]
web_app.add_handlers(host_pattern, handlers_wasm)
|
import tempfile
import pandas as pd
from joblib import dump
import numpy as np
from sklearn.linear_model import ElasticNet
from sklearn.metrics import make_scorer, mean_squared_error
from sklearn.model_selection import GridSearchCV, TimeSeriesSplit
from analysis.hit_ratio_error import hit_ratio_error
from utils.logging import init_logger
from utils.s3_manager.manage import S3Manager
from utils.visualize import draw_hist
class ElasticNetModel:
"""
ElasticNet
"""
def __init__(self, bucket_name: str, x_train, y_train, params=None):
# logger
self.logger = init_logger()
# s3
self.s3_manager = S3Manager(bucket_name=bucket_name)
if params is None:
self.model = ElasticNet()
else:
self.model = ElasticNet(**params)
self.x_train, self.y_train = x_train, y_train
self.error = None
self.metric = None
def fit(self):
self.model.fit(self.x_train, self.y_train)
def predict(self, X):
return self.model.predict(X=X)
def estimate_metric(self, scorer, y, predictions):
self.error = pd.Series(y - predictions, name="error")
self.metric = scorer(y_true=y, y_pred=predictions)
return self.metric
def score(self):
return self.model.score(self.x_train, self.y_train)
@property
def coef_df(self):
"""
:return: pd DataFrame
"""
return pd.Series(
data=np.append(self.model.coef_, self.model.intercept_),
index=self.x_train.columns.tolist() + ["intercept"],
).rename("beta").reset_index().rename(columns={"index": "column"})
def save(self, prefix):
"""
save beta coef, metric, distribution, model
:param prefix: dir
"""
self.save_coef(key="{prefix}/beta.csv".format(prefix=prefix))
self.save_metric(key="{prefix}/metric.pkl".format(prefix=prefix))
self.save_error_distribution(prefix=prefix)
self.save_model(key="{prefix}/model.pkl".format(prefix=prefix))
def save_coef(self, key):
self.logger.info("coef:\n{coef}".format(coef=self.coef_df))
self.s3_manager.save_df_to_csv(self.coef_df, key=key)
def save_metric(self, key):
self.logger.info("customized RMSE is {metric}".format(metric=self.metric))
self.s3_manager.save_dump(x=self.metric, key=key)
def save_model(self, key):
self.s3_manager.save_dump(self.model, key=key)
def save_error_distribution(self, prefix):
draw_hist(self.error)
self.s3_manager.save_plt_to_png(
key="{prefix}/image/error_distribution.png".format(prefix=prefix)
)
ratio = hit_ratio_error(self.error)
self.s3_manager.save_plt_to_png(
key="{prefix}/image/hit_ratio_error.png".format(prefix=prefix)
)
return ratio
class ElasticNetSearcher(GridSearchCV):
"""
for research
"""
def __init__(
self, x_train, y_train, bucket_name,
grid_params=None, score=mean_squared_error
):
if grid_params is None:
grid_params = {
"max_iter": [1, 5, 10],
"alpha": [0, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100],
"l1_ratio": np.arange(0.0, 1.0, 0.1)
}
self.x_train = x_train
self.y_train = y_train
self.scorer = score
self.error = None # pd.Series
self.metric = None
# s3
self.s3_manager = S3Manager(bucket_name=bucket_name)
# logger
self.logger = init_logger()
super().__init__(
estimator=ElasticNet(),
param_grid=grid_params,
scoring=make_scorer(self.scorer, greater_is_better=False),
# we have to know the relationship before and after obviously, so n_splits: 2
cv=TimeSeriesSplit(n_splits=2).split(self.x_train)
)
def fit(self, X=None, y=None, groups=None, **fit_params):
super().fit(X=self.x_train, y=self.y_train)
@property
def coef_df(self):
"""
:return: pd DataFrame
"""
return pd.Series(
data=np.append(self.best_estimator_.coef_, self.best_estimator_.intercept_),
index=self.x_train.columns.tolist() + ["intercept"],
).rename("beta").reset_index().rename(columns={"index": "column"})
def estimate_metric(self, y_true, y_pred):
self.error = pd.Series(y_true - y_pred, name="error")
self.metric = self.scorer(y_true=y_true, y_pred=y_pred)
return self.metric
def save(self, prefix):
"""
save tuned params, beta coef, metric, distribution, model
:param prefix: dir
"""
self.save_params(key="{prefix}/params.pkl".format(prefix=prefix))
self.save_coef(key="{prefix}/beta.pkl".format(prefix=prefix))
self.save_metric(key="{prefix}/metric.pkl".format(prefix=prefix))
self.save_error_distribution(prefix=prefix)
self.save_model(key="{prefix}/model.pkl".format(prefix=prefix))
def save_params(self, key):
self.logger.info("tuned params: {params}".format(params=self.best_params_))
self.s3_manager.save_dump(x=self.best_params_, key=key)
def save_coef(self, key):
self.logger.info("beta_coef:\n{coef}".format(coef=self.coef_df))
self.s3_manager.save_df_to_csv(self.coef_df, key=key)
def save_metric(self, key):
self.logger.info("customized RMSE is {metric}".format(metric=self.metric))
self.s3_manager.save_dump(x=self.metric, key=key)
def save_model(self, key):
# save best elastic net
self.s3_manager.save_dump(self.best_estimator_, key=key)
def save_error_distribution(self, prefix):
draw_hist(self.error)
self.s3_manager.save_plt_to_png(
key="{prefix}/image/error_distribution.png".format(prefix=prefix)
)
ratio = hit_ratio_error(self.error)
self.s3_manager.save_plt_to_png(
key="{prefix}/image/hit_ratio_error.png".format(prefix=prefix)
)
return ratio
|
from traceback_with_variables import prints_tb, ColorSchemes
@prints_tb(
num_context_lines=3,
max_value_str_len=100,
max_exc_str_len=1000,
ellipsis_='...',
color_scheme=ColorSchemes.synthwave,
)
def f(n):
print(1 / n)
def main():
f(0)
main()
|
# coding: utf-8
"""
Statuspage API
# Code of Conduct Please don't abuse the API, and please report all feature requests and issues to https://help.statuspage.io/help/contact-us-30 # Rate Limiting Each API token is limited to 1 request / second as measured on a 60 second rolling window. To get this limit increased or lifted, please contact us at https://help.statuspage.io/help/contact-us-30 # Basics ## HTTPS It's required ## URL Prefix In order to maintain version integrity into the future, the API is versioned. All calls currently begin with the following prefix: https://api.statuspage.io/v1/ ## RESTful Interface Wherever possible, the API seeks to implement repeatable patterns with logical, representative URLs and descriptive HTTP verbs. Below are some examples and conventions you will see throughout the documentation. * Collections are buckets: https://api.statuspage.io/v1/pages/asdf123/incidents.json * Elements have unique IDs: https://api.statuspage.io/v1/pages/asdf123/incidents/jklm456.json * GET will retrieve information about a collection/element * POST will create an element in a collection * PATCH will update a single element * PUT will replace a single element in a collection (rarely used) * DELETE will destroy a single element ## Sending Data Information can be sent in the body as form urlencoded or JSON, but make sure the Content-Type header matches the body structure or the server gremlins will be angry. All examples are provided in JSON format, however they can easily be converted to form encoding if required. Some examples of how to convert things are below: // JSON { \"incident\": { \"name\": \"test incident\", \"components\": [\"8kbf7d35c070\", \"vtnh60py4yd7\"] } } // Form Encoded (using curl as an example): curl -X POST https://api.statuspage.io/v1/example \\ -d \"incident[name]=test incident\" \\ -d \"incident[components][]=8kbf7d35c070\" \\ -d \"incident[components][]=vtnh60py4yd7\" # Authentication <!-- ReDoc-Inject: <security-definitions> --> # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from spio.api_client import ApiClient
from spio.exceptions import (
ApiTypeError,
ApiValueError
)
class IncidentSubscribersApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_pages_page_id_incidents_incident_id_subscribers_subscriber_id(self, page_id, incident_id, subscriber_id, **kwargs): # noqa: E501
"""Unsubscribe an incident subscriber # noqa: E501
Unsubscribe an incident subscriber # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_pages_page_id_incidents_incident_id_subscribers_subscriber_id(page_id, incident_id, subscriber_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str page_id: Page identifier (required)
:param str incident_id: Incident Identifier (required)
:param str subscriber_id: Subscriber Identifier (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Subscriber
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_pages_page_id_incidents_incident_id_subscribers_subscriber_id_with_http_info(page_id, incident_id, subscriber_id, **kwargs) # noqa: E501
def delete_pages_page_id_incidents_incident_id_subscribers_subscriber_id_with_http_info(self, page_id, incident_id, subscriber_id, **kwargs): # noqa: E501
"""Unsubscribe an incident subscriber # noqa: E501
Unsubscribe an incident subscriber # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_pages_page_id_incidents_incident_id_subscribers_subscriber_id_with_http_info(page_id, incident_id, subscriber_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str page_id: Page identifier (required)
:param str incident_id: Incident Identifier (required)
:param str subscriber_id: Subscriber Identifier (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Subscriber, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['page_id', 'incident_id', 'subscriber_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_pages_page_id_incidents_incident_id_subscribers_subscriber_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'page_id' is set
if self.api_client.client_side_validation and ('page_id' not in local_var_params or # noqa: E501
local_var_params['page_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `page_id` when calling `delete_pages_page_id_incidents_incident_id_subscribers_subscriber_id`") # noqa: E501
# verify the required parameter 'incident_id' is set
if self.api_client.client_side_validation and ('incident_id' not in local_var_params or # noqa: E501
local_var_params['incident_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `incident_id` when calling `delete_pages_page_id_incidents_incident_id_subscribers_subscriber_id`") # noqa: E501
# verify the required parameter 'subscriber_id' is set
if self.api_client.client_side_validation and ('subscriber_id' not in local_var_params or # noqa: E501
local_var_params['subscriber_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `subscriber_id` when calling `delete_pages_page_id_incidents_incident_id_subscribers_subscriber_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'page_id' in local_var_params:
path_params['page_id'] = local_var_params['page_id'] # noqa: E501
if 'incident_id' in local_var_params:
path_params['incident_id'] = local_var_params['incident_id'] # noqa: E501
if 'subscriber_id' in local_var_params:
path_params['subscriber_id'] = local_var_params['subscriber_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/pages/{page_id}/incidents/{incident_id}/subscribers/{subscriber_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Subscriber', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_pages_page_id_incidents_incident_id_subscribers(self, page_id, incident_id, **kwargs): # noqa: E501
"""Get a list of incident subscribers # noqa: E501
Get a list of incident subscribers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pages_page_id_incidents_incident_id_subscribers(page_id, incident_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str page_id: Page identifier (required)
:param str incident_id: Incident Identifier (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[Subscriber]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_pages_page_id_incidents_incident_id_subscribers_with_http_info(page_id, incident_id, **kwargs) # noqa: E501
def get_pages_page_id_incidents_incident_id_subscribers_with_http_info(self, page_id, incident_id, **kwargs): # noqa: E501
"""Get a list of incident subscribers # noqa: E501
Get a list of incident subscribers # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pages_page_id_incidents_incident_id_subscribers_with_http_info(page_id, incident_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str page_id: Page identifier (required)
:param str incident_id: Incident Identifier (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[Subscriber], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['page_id', 'incident_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_pages_page_id_incidents_incident_id_subscribers" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'page_id' is set
if self.api_client.client_side_validation and ('page_id' not in local_var_params or # noqa: E501
local_var_params['page_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `page_id` when calling `get_pages_page_id_incidents_incident_id_subscribers`") # noqa: E501
# verify the required parameter 'incident_id' is set
if self.api_client.client_side_validation and ('incident_id' not in local_var_params or # noqa: E501
local_var_params['incident_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `incident_id` when calling `get_pages_page_id_incidents_incident_id_subscribers`") # noqa: E501
collection_formats = {}
path_params = {}
if 'page_id' in local_var_params:
path_params['page_id'] = local_var_params['page_id'] # noqa: E501
if 'incident_id' in local_var_params:
path_params['incident_id'] = local_var_params['incident_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/pages/{page_id}/incidents/{incident_id}/subscribers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Subscriber]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_pages_page_id_incidents_incident_id_subscribers_subscriber_id(self, page_id, incident_id, subscriber_id, **kwargs): # noqa: E501
"""Get an incident subscriber # noqa: E501
Get an incident subscriber # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pages_page_id_incidents_incident_id_subscribers_subscriber_id(page_id, incident_id, subscriber_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str page_id: Page identifier (required)
:param str incident_id: Incident Identifier (required)
:param str subscriber_id: Subscriber Identifier (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Subscriber
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_pages_page_id_incidents_incident_id_subscribers_subscriber_id_with_http_info(page_id, incident_id, subscriber_id, **kwargs) # noqa: E501
def get_pages_page_id_incidents_incident_id_subscribers_subscriber_id_with_http_info(self, page_id, incident_id, subscriber_id, **kwargs): # noqa: E501
"""Get an incident subscriber # noqa: E501
Get an incident subscriber # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pages_page_id_incidents_incident_id_subscribers_subscriber_id_with_http_info(page_id, incident_id, subscriber_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str page_id: Page identifier (required)
:param str incident_id: Incident Identifier (required)
:param str subscriber_id: Subscriber Identifier (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Subscriber, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['page_id', 'incident_id', 'subscriber_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_pages_page_id_incidents_incident_id_subscribers_subscriber_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'page_id' is set
if self.api_client.client_side_validation and ('page_id' not in local_var_params or # noqa: E501
local_var_params['page_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `page_id` when calling `get_pages_page_id_incidents_incident_id_subscribers_subscriber_id`") # noqa: E501
# verify the required parameter 'incident_id' is set
if self.api_client.client_side_validation and ('incident_id' not in local_var_params or # noqa: E501
local_var_params['incident_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `incident_id` when calling `get_pages_page_id_incidents_incident_id_subscribers_subscriber_id`") # noqa: E501
# verify the required parameter 'subscriber_id' is set
if self.api_client.client_side_validation and ('subscriber_id' not in local_var_params or # noqa: E501
local_var_params['subscriber_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `subscriber_id` when calling `get_pages_page_id_incidents_incident_id_subscribers_subscriber_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'page_id' in local_var_params:
path_params['page_id'] = local_var_params['page_id'] # noqa: E501
if 'incident_id' in local_var_params:
path_params['incident_id'] = local_var_params['incident_id'] # noqa: E501
if 'subscriber_id' in local_var_params:
path_params['subscriber_id'] = local_var_params['subscriber_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/pages/{page_id}/incidents/{incident_id}/subscribers/{subscriber_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Subscriber', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def post_pages_page_id_incidents_incident_id_subscribers(self, page_id, incident_id, post_pages_page_id_incidents_incident_id_subscribers, **kwargs): # noqa: E501
"""Create an incident subscriber # noqa: E501
Create an incident subscriber # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_pages_page_id_incidents_incident_id_subscribers(page_id, incident_id, post_pages_page_id_incidents_incident_id_subscribers, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str page_id: Page identifier (required)
:param str incident_id: Incident Identifier (required)
:param PostPagesPageIdIncidentsIncidentIdSubscribers post_pages_page_id_incidents_incident_id_subscribers: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Subscriber
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.post_pages_page_id_incidents_incident_id_subscribers_with_http_info(page_id, incident_id, post_pages_page_id_incidents_incident_id_subscribers, **kwargs) # noqa: E501
def post_pages_page_id_incidents_incident_id_subscribers_with_http_info(self, page_id, incident_id, post_pages_page_id_incidents_incident_id_subscribers, **kwargs): # noqa: E501
"""Create an incident subscriber # noqa: E501
Create an incident subscriber # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_pages_page_id_incidents_incident_id_subscribers_with_http_info(page_id, incident_id, post_pages_page_id_incidents_incident_id_subscribers, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str page_id: Page identifier (required)
:param str incident_id: Incident Identifier (required)
:param PostPagesPageIdIncidentsIncidentIdSubscribers post_pages_page_id_incidents_incident_id_subscribers: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Subscriber, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['page_id', 'incident_id', 'post_pages_page_id_incidents_incident_id_subscribers'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method post_pages_page_id_incidents_incident_id_subscribers" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'page_id' is set
if self.api_client.client_side_validation and ('page_id' not in local_var_params or # noqa: E501
local_var_params['page_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `page_id` when calling `post_pages_page_id_incidents_incident_id_subscribers`") # noqa: E501
# verify the required parameter 'incident_id' is set
if self.api_client.client_side_validation and ('incident_id' not in local_var_params or # noqa: E501
local_var_params['incident_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `incident_id` when calling `post_pages_page_id_incidents_incident_id_subscribers`") # noqa: E501
# verify the required parameter 'post_pages_page_id_incidents_incident_id_subscribers' is set
if self.api_client.client_side_validation and ('post_pages_page_id_incidents_incident_id_subscribers' not in local_var_params or # noqa: E501
local_var_params['post_pages_page_id_incidents_incident_id_subscribers'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `post_pages_page_id_incidents_incident_id_subscribers` when calling `post_pages_page_id_incidents_incident_id_subscribers`") # noqa: E501
collection_formats = {}
path_params = {}
if 'page_id' in local_var_params:
path_params['page_id'] = local_var_params['page_id'] # noqa: E501
if 'incident_id' in local_var_params:
path_params['incident_id'] = local_var_params['incident_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'post_pages_page_id_incidents_incident_id_subscribers' in local_var_params:
body_params = local_var_params['post_pages_page_id_incidents_incident_id_subscribers']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/pages/{page_id}/incidents/{incident_id}/subscribers', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Subscriber', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def post_pages_page_id_incidents_incident_id_subscribers_subscriber_id_resend_confirmation(self, page_id, incident_id, subscriber_id, **kwargs): # noqa: E501
"""Resend confirmation to an incident subscriber # noqa: E501
Resend confirmation to an incident subscriber # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_pages_page_id_incidents_incident_id_subscribers_subscriber_id_resend_confirmation(page_id, incident_id, subscriber_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str page_id: Page identifier (required)
:param str incident_id: Incident Identifier (required)
:param str subscriber_id: Subscriber Identifier (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.post_pages_page_id_incidents_incident_id_subscribers_subscriber_id_resend_confirmation_with_http_info(page_id, incident_id, subscriber_id, **kwargs) # noqa: E501
def post_pages_page_id_incidents_incident_id_subscribers_subscriber_id_resend_confirmation_with_http_info(self, page_id, incident_id, subscriber_id, **kwargs): # noqa: E501
"""Resend confirmation to an incident subscriber # noqa: E501
Resend confirmation to an incident subscriber # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_pages_page_id_incidents_incident_id_subscribers_subscriber_id_resend_confirmation_with_http_info(page_id, incident_id, subscriber_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str page_id: Page identifier (required)
:param str incident_id: Incident Identifier (required)
:param str subscriber_id: Subscriber Identifier (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['page_id', 'incident_id', 'subscriber_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method post_pages_page_id_incidents_incident_id_subscribers_subscriber_id_resend_confirmation" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'page_id' is set
if self.api_client.client_side_validation and ('page_id' not in local_var_params or # noqa: E501
local_var_params['page_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `page_id` when calling `post_pages_page_id_incidents_incident_id_subscribers_subscriber_id_resend_confirmation`") # noqa: E501
# verify the required parameter 'incident_id' is set
if self.api_client.client_side_validation and ('incident_id' not in local_var_params or # noqa: E501
local_var_params['incident_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `incident_id` when calling `post_pages_page_id_incidents_incident_id_subscribers_subscriber_id_resend_confirmation`") # noqa: E501
# verify the required parameter 'subscriber_id' is set
if self.api_client.client_side_validation and ('subscriber_id' not in local_var_params or # noqa: E501
local_var_params['subscriber_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `subscriber_id` when calling `post_pages_page_id_incidents_incident_id_subscribers_subscriber_id_resend_confirmation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'page_id' in local_var_params:
path_params['page_id'] = local_var_params['page_id'] # noqa: E501
if 'incident_id' in local_var_params:
path_params['incident_id'] = local_var_params['incident_id'] # noqa: E501
if 'subscriber_id' in local_var_params:
path_params['subscriber_id'] = local_var_params['subscriber_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/pages/{page_id}/incidents/{incident_id}/subscribers/{subscriber_id}/resend_confirmation', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
#
# plot-trav-wave-x
# Plot spatial profile of a travelling wave
#
# Sparisoma Viridi | https://butiran.github.io
#
# 20210206
# 0525 Learn to plot from [1].
# 0538 Learn from [2].
# 0544 Use \pi from [3].
# 0557 Can draw a sine function.
# 0604 Learn add_subplot [4].
# 0618 Just know this one [5].
# 0639 Read style in plotting [6].
# 0654 Give comments for clearer reading of the code.
# 0702 Add axis label [7].
#
# References
# 1. url https://matplotlib.org/gallery/animation
# /double_pendulum_animated_sgskip.html [20210205].
# 2. The SciPy community, "numpy.arange", NumPy, 31 Jan 2021,
# url https://numpy.org/doc/stable/reference/generated
# /numpy.arange.html [20210206].
# 3. The SciPy community, "Constants", NumPy, 31 Jan 2021,
# url https://numpy.org/doc/stable/reference
# /constants.html [20210206].
# 4. John Hunter, Darren Dale, Eric Firing, Michael Droettboom,
# and the Matplotlib development team, "matplotlib.pyplot
# .subplot", 5 Jan 2020, url https://matplotlib.org/3.1.1
# /api/_as_gen/matplotlib.pyplot.subplot.html [20210206].
# 5. Guido Mocha, "Answer to ''", StackOverflow, 9 Sep 2019 at
# 13:55, url https://stackoverflow.com/a/57855482 [20210206].
# 6. John Hunter, Darren Dale, Eric Firing, Michael Droettboom,
# and the Matplotlib development team, "matplotlib.axes
# .Axes.plot", 5 Jan 2020, url https://matplotlib.org/3.1.1
# /api/_as_gen/matplotlib.axes.Axes.plot.html [20210206].
# 7. John Hunter, Darren Dale, Eric Firing, Michael Droettboom,
# and the Matplotlib development team, "Simple axes labels",
# 18 May 2019, url https://matplotlib.org/3.1.1/api/_as_gen
# /matplotlib.axes.Axes.plot.html [20210206].
#
# Import required libraries
from numpy import sin, cos
import numpy as np
import matplotlib.pyplot as plt
# Define a function for travelling wave
def travwave(x, t):
A = 0.2
T = 1
_l = 1
_w = 2 * np.pi / T
k = 2 * np.pi / _l
_varphi0 = np.pi
y = A * sin(_w * t - k * x + _varphi0)
return y
# Set observation time
t = 0.50
# Create x and y data
x = np.arange(0, 2, 0.04)
y = travwave(x, t)
# Get figure for plotting
fig = plt.figure()
# Configure axes
ax = fig.add_subplot(
111,
autoscale_on=True,
xlim=(0, 2),
ylim=(-0.2, 0.2)
)
ax.set_aspect('equal')
ax.grid()
ax.set_xlabel('x')
ax.set_ylabel('y')
# Set style for plotting
line, = ax.plot([], [], 'sr-', lw=1, ms=4)
time_template = 't = %.2f s'
time_text = ax.text(0.8, 0.8, '', transform=ax.transAxes)
# Set data and time information
line.set_data(x, y)
time_text.set_text(time_template % t)
# Show plotting result
plt.show()
|
import main
async def create(bot):
with open('../web/db.sqlite3', 'rb') as database_file:
await main.send_file_to_global_admin(database_file, bot)
|
# partially based on https://github.com/hiram64/temporal-ensembling-semi-supervised
import sys
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.losses import mean_squared_error
from lib.utils import to_onehot
def create_loss_func(num_class, class_distr, ssl_method=None, to_return=None, enable_harden=False):
"""
builds a custom tf loss function
:param num_class: number of classes
:param class_distr: class distribution for imbalanced datasets
:param ssl_method: semi-supervised method
:param to_return: which loss variant to return
:param enable_harden: Experimantal
:return:
"""
epsilon = 1e-08
pseudo_label_threshold = 0.55
if class_distr is not None:
inv_distr = 1 / class_distr
class_weights = len(class_distr) * inv_distr / sum(inv_distr)
def harden(x):
return (tf.math.erf(8 * (x - 0.5)) + 1) / 2
def weight_f(y_true, y_pred):
weight = y_true[:, -1]
weight = K.mean(weight)
return weight
def cross_entropy(prediction, one_hot_target, selection):
c_entropy = one_hot_target * K.log(K.clip(prediction, epsilon, 1.0 - epsilon))
if class_distr is not None:
c_entropy *= class_weights
# To sum over only supervised data on categorical_crossentropy, supervised_flag(1/0) is used
supervised_loss = - K.mean(
K.sum(c_entropy, axis=1) * selection
)
return supervised_loss
def pi_model_f(y_true, y_pred):
unsupervised_target = y_true[:, 0:num_class]
model_pred = y_pred[:, 0:num_class]
if enable_harden:
unsupervised_target = harden(unsupervised_target)
model_pred = harden(model_pred)
return K.mean(mean_squared_error(unsupervised_target, model_pred))
def pi_model_labeled_f(y_true, y_pred):
unsupervised_target = y_true[:, 0:num_class]
supervised_flag = y_true[:, num_class * 2]
model_pred = y_pred[:, 0:num_class]
return K.mean(mean_squared_error(unsupervised_target, model_pred) * supervised_flag)
def pi_model_unlabeled_f(y_true, y_pred):
return pi_model_f(y_true, y_pred) - pi_model_labeled_f(y_true, y_pred)
def pseudo_label_f(y_true, y_pred):
unsupervised_flag = 1 - y_true[:, num_class * 2]
model_pred = y_pred[:, 0:num_class]
batch_size = K.shape(model_pred)[0]
max_confidence = K.max(model_pred, axis=1)
max_confidence = K.reshape(max_confidence, (1, batch_size))
pseudo_target = model_pred - K.transpose(max_confidence)
pseudo_target = K.equal(pseudo_target, 0)
pseudo_target = K.cast(pseudo_target, 'float32')
cutoff = K.greater(max_confidence, pseudo_label_threshold)
cutoff = K.cast(cutoff, 'float32')
selection = unsupervised_flag * cutoff
unsupervised_loss = cross_entropy(model_pred, pseudo_target, selection)
return unsupervised_loss
def unsupervised_f(y_true, y_pred):
if ssl_method == 'pi-model':
unsupervised_loss = pi_model_f(y_true, y_pred)
elif ssl_method == 'pseudo-label':
unsupervised_loss = pseudo_label_f(y_true, y_pred)
else:
unsupervised_loss = 0
return unsupervised_loss
def supervised_f(y_true, y_pred):
supervised_label = y_true[:, num_class:num_class * 2]
supervised_flag = y_true[:, num_class * 2]
model_pred = y_pred[:, 0:num_class]
supervised_loss = cross_entropy(model_pred, supervised_label, supervised_flag)
return supervised_loss
def loss_func(y_true, y_pred):
"""
semi-supervised loss function
the order of y_true:
unsupervised_target(num_class), supervised_label(num_class), supervised_flag(1), unsupervised weight(1)
"""
weight = y_true[:, -1]
supervised_loss = supervised_f(y_true, y_pred)
unsupervised_loss = unsupervised_f(y_true, y_pred)
return supervised_loss + weight * unsupervised_loss
if to_return == 'weight':
return weight_f
elif to_return == 'supervised':
return supervised_f
elif to_return == 'unsupervised':
return unsupervised_f
elif to_return == 'pi_model_labeled':
return pi_model_labeled_f
elif to_return == 'pi_model_unlabeled':
return pi_model_unlabeled_f
return loss_func
def wrap_print(x, message):
"""
Help function for debug
"""
# op = tf.print(message, x, output_stream='file://loss.log', summarize=-1)
op = tf.print(message, x, output_stream=sys.stdout, summarize=-1)
with tf.control_dependencies([op]):
return 0 * tf.identity(tf.reduce_mean(x))
def ramp_up_weight(ramp_period, weight_max):
"""
ramp-up weight generator.
used in unsupervised component of loss.
:param ramp_period: length of the ramp up period
:param weight_max: maximal weight
"""
cur_epoch = 0
while True:
if cur_epoch <= ramp_period - 1:
T = (1 / (ramp_period - 1)) * cur_epoch
yield np.exp(-5 * (1 - T) ** 2) * weight_max
else:
yield 1 * weight_max
cur_epoch += 1
def ramp_down_weight(ramp_period):
"""
ramp-down weight generator
:param ramp_period: length of the ramp-down period
"""
cur_epoch = 1
while True:
if cur_epoch <= ramp_period - 1:
T = (1 / (ramp_period - 1)) * cur_epoch
yield np.exp(-12.5 * T ** 2)
else:
yield 0
cur_epoch += 1
def update_weight(y, unsupervised_weight, next_weight):
"""
update weight of the unsupervised part of loss
"""
y[:, -1] = next_weight
unsupervised_weight[:] = next_weight
return y, unsupervised_weight
def evaluate(model, num_class, test_x, test_y, hot=True):
"""
Evaluate the models prediction on a test set
:param model: Model
:param num_class: number of classes
:param test_x: test samples
:param test_y: test targets
:param hot: whether the target is one hot encoded
:return:
"""
assert len(test_x) == len(test_y)
if not hot:
test_y = to_onehot(test_y, num_class)
num_test = len(test_y)
test_supervised_label_dummy = np.zeros((num_test, num_class))
test_supervised_flag_dummy = np.zeros((num_test, 1))
test_unsupervised_weight_dummy = np.zeros((num_test, 1))
test_x_ap = [test_x, test_supervised_label_dummy, test_supervised_flag_dummy, test_unsupervised_weight_dummy]
p = model.predict(x=test_x_ap)
pr = p[:, 0:num_class]
pr_arg_max = np.argmax(pr, axis=1)
tr_arg_max = np.argmax(test_y, axis=1)
cnt = np.sum(pr_arg_max == tr_arg_max)
acc = cnt / num_test
return acc
|
import json
from flask import Flask, send_from_directory, render_template, request
import get_data
app = Flask(__name__)
@app.route("/")
def index():
start = request.cookies.get("start") or "000.jpg"
grid = ""
bounds = ""
figures = ""
if start[0] == "1":
grid = " checked"
if start[1] == "1":
bounds = " checked"
if start[2] == "1":
figures = " checked"
return render_template("index.html", start=start, grid=grid, bounds=bounds, figures=figures)
@app.route("/search_catalogs", methods=["POST"])
def fetch_catalogs():
service = {
"Simple Cone Search": "SCS",
"Simple Image Access Protocol": "SIAP",
"Simple Spectral Access": "SSA"
}[request.form["service"]]
results = get_data.catalog_search(request.form["search_term"], service, 20) or ""
return json.dumps(results)
@app.route("/name_search", methods=["POST"])
def name_search():
service = {
"Simple Cone Search": "SCS",
"Simple Image Access Protocol": "SIAP",
"Simple Spectral Access": "SSA"
}[request.form["service"]]
ra, dec = get_data.name_to_coords(request.form["name"])
if ra is None:
# If ra is None, dec is also None, and it means the name was invalid
return json.dumps("")
results = get_data.service_heasarc(service, request.form["id"], ra, dec, request.form["search_radius"])
if results is not None:
results = list(results)
first_column = {}
key = results[0].columns[0] if results[1] == {} else results[1][results[0].columns[0]]["desc"]
first_column[key] = results[0][results[0].columns[0]].to_list()
results[0] = results[0].to_json()
results.append(first_column)
else:
results = ""
return json.dumps(results)
@app.route("/coordinates_search", methods=["POST"])
def coordinates_search():
service = {
"Simple Cone Search": "SCS",
"Simple Image Access Protocol": "SIAP",
"Simple Spectral Access": "SSA"
}[request.form["service"]]
results = get_data.service_heasarc(service, request.form["id"], request.form["ra"], request.form["dec"], request.form["search_radius"])
if results is not None:
results = list(results)
first_column = {}
key = results[0].columns[0] if results[1] == {} else results[1][results[0].columns[0]]["desc"]
first_column[key] = results[0][results[0].columns[0]].to_list()
results[0] = results[0].to_json()
results.append(first_column)
else:
results = ""
return json.dumps(results)
@app.route("/scripts/<path:path>")
def send_script(path):
return send_from_directory("static/scripts", path)
@app.route("/styles/<path:path>")
def send_styles(path):
return send_from_directory("static/styles", path)
@app.route("/images/<path:path>")
def send_images(path):
return send_from_directory("static/images", path)
|
"""Converts DC2G data to TFRecords file format with Example protos."""
import os
import sys
import tensorflow as tf
import matplotlib.pyplot as plt
import csv
from dc2g.util import get_training_testing_houses, get_object_goal_names
dir_path, _ = os.path.split(os.path.dirname(os.path.realpath(__file__)))
# dataset = "house3d"
# dataset = "driveways_iros19"
dataset = "driveways_bing_iros19"
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert(mode, houses, goal_names):
print("Converting {}".format(mode))
# goal_names = ["television", "toilet"]
# goal_names = ["television"]
# houses = ["0004d52d1aeeb8ae6de39d6bd993e992", "000514ade3bcc292a613a4c2755a5050", "00a42e8f3cb11489501cfeba86d6a297"]
if mode == "train":
if dataset == "house3d":
num_masks = 105
elif "driveways" in dataset:
num_masks = 256
masks = range(num_masks)
if mode == "val":
if dataset == "house3d":
num_masks = 105
elif "driveways" in dataset:
num_masks = 80
masks = range(num_masks)
elif mode == "test":
# masks = [52, 29, 21, 5, 89]
# masks = [1,2,3,4]
masks = range(15)
count = 0
tf_record_filename = "{dir_path}/training_data/{dataset}/tf_records/{mode}.tfrecords".format(dir_path=dir_path, mode=mode, dataset=dataset)
with tf.python_io.TFRecordWriter(tf_record_filename) as writer:
for world_id in houses:
# print("Adding {} to tfrecord.".format(world_id))
for mask_id in masks:
# print("mask {}".format(mask_id))
for goal_name in goal_names:
mask_id_str = str(mask_id).zfill(3)
semantic_map_filename = "{dir_path}/training_data/{dataset}/masked_semantic/{mode}/world{world_id}_{mask_id_str}.png".format(mask_id_str=mask_id_str, world_id=world_id, goal_name=goal_name, dir_path=dir_path, mode=mode, dataset=dataset)
c2g_map_filename = "{dir_path}/training_data/{dataset}/masked_c2g/{mode}/world{world_id}_{mask_id_str}-{goal_name}.png".format(mask_id_str=mask_id_str, world_id=world_id, goal_name=goal_name, dir_path=dir_path, mode=mode, dataset=dataset)
# semantic_map_filename = "{dir_path}/training_data/{dataset}/full_semantic/{mode}/world{world_id}.png".format(world_id=world_id, goal_name=goal_name, dir_path=dir_path, mode=mode, dataset=dataset)
# c2g_map_filename = "{dir_path}/training_data/{dataset}/full_c2g/{mode}/world{world_id}-{goal_name}.png".format(world_id=world_id, goal_name=goal_name, dir_path=dir_path, mode=mode, dataset=dataset)
if os.path.isfile(semantic_map_filename) and os.path.isfile(c2g_map_filename):
count += 1
with open(semantic_map_filename, 'rb') as f:
semantic_map = f.read()
with open(c2g_map_filename, 'rb') as f:
c2g_map = f.read()
goal_name_bytes = goal_name.encode('utf-8')
world_id_bytes = world_id.encode('utf-8')
mask_id_bytes = mask_id_str.encode('utf-8')
# print("Adding {} w/ goal {} to tfrecord.".format(world_id, goal_name))
example = tf.train.Example(
features=tf.train.Features(
feature={
'goal_class': _bytes_feature(goal_name_bytes),
'semantic_map': _bytes_feature(semantic_map),
'c2g_map': _bytes_feature(c2g_map),
'world_id': _bytes_feature(world_id_bytes),
'mask_id': _bytes_feature(mask_id_bytes),
}))
writer.write(example.SerializeToString())
print("Added {count} pairs to the tfrecord.".format(count=count))
def main():
training_houses, validation_houses, testing_houses = get_training_testing_houses(dataset)
object_goal_names = get_object_goal_names(dataset)
# Convert to Examples and write the result to TFRecords.
convert('train', training_houses, object_goal_names)
convert('val', validation_houses, object_goal_names)
convert('test', testing_houses, object_goal_names)
if __name__ == '__main__':
main()
|
import struct, re
import pyretic.vendor
from ryu.lib.packet import *
from ryu.lib import addrconv
from pyretic.core import util
from pyretic.core.network import IPAddr, EthAddr
__all__ = ['of_field', 'of_fields', 'get_packet_processor', 'Packet']
_field_list = dict()
IPV4 = 0x0800
IPV6 = 0x86dd
VLAN = 0x8100
ARP = 0x0806
ICMP_PROTO = 1
TCP_PROTO = 6
UDP_PROTO = 17
##################################################
# EMPTY TEMPLATE PACKETS FOR DIFFERENT PROTCOLS
##################################################
def arp_packet_gen():
pkt = packet.Packet()
pkt.protocols.append(ethernet.ethernet("ff:ff:ff:ff:ff:ff", "ff:ff:ff:ff:ff:ff", ARP))
pkt.protocols.append(arp.arp())
return pkt
def ipv6_packet_gen():
pkt = packet.Packet()
pkt.protocols.append(ethernet.ethernet("ff:ff:ff:ff:ff:ff", "ff:ff:ff:ff:ff:ff", IPV6))
pkt.protocols.append(ipv6.ipv6(6, 0, 0, 0, 0, 0, '0:0:0:0:0:0:0:0', '0:0:0:0:0:0:0:0'))
return pkt
def udp_packet_gen():
pkt = packet.Packet()
pkt.protocols.append(ethernet.ethernet("ff:ff:ff:ff:ff:ff", "ff:ff:ff:ff:ff:ff", IPV4))
pkt.protocols.append(ipv4.ipv4(proto=UDP_PROTO))
pkt.protocols.append(udp.udp(0, 0))
return pkt
def tcp_packet_gen():
pkt = packet.Packet()
pkt.protocols.append(ethernet.ethernet("ff:ff:ff:ff:ff:ff", "ff:ff:ff:ff:ff:ff", IPV4))
pkt.protocols.append(ipv4.ipv4(proto=TCP_PROTO))
pkt.protocols.append(tcp.tcp(0, 0, 0, 0, 0, 0, 0, 0, 0))
return pkt
def icmp_packet_gen():
pkt = packet.Packet()
pkt.protocols.append(ethernet.ethernet("ff:ff:ff:ff:ff:ff", "ff:ff:ff:ff:ff:ff", IPV4))
pkt.protocols.append(ipv4.ipv4(proto=ICMP_PROTO))
pkt.protocols.append(icmp.icmp(0, 0, 0))
return pkt
ethertype_packets = {
IPV4: {
ICMP_PROTO: icmp_packet_gen,
TCP_PROTO : tcp_packet_gen,
UDP_PROTO : udp_packet_gen
},
IPV6: ipv6_packet_gen,
ARP: arp_packet_gen
}
def build_empty_packet(ethertype, proto=None):
if ethertype:
pkt = ethertype_packets[ethertype]
if proto is not None and not callable(pkt): pkt = pkt[proto]
return pkt()
return packet.Packet()
def of_fields(version="1.0"):
return _field_list[version]
def get_protocol(ryu_pkt, protocol):
for idx, i in enumerate(ryu_pkt.protocols):
if hasattr(i, "protocol_name") and i.protocol_name==protocol:
return idx
return None
################################################################################
# Processor
################################################################################
class Processor(object):
def __init__(self):
pass
def compile(self):
fields = of_fields()
fields = [field() for _, field in fields.items()]
validators = { field.validator.__class__.__name__: dict() for field in fields }
for field in fields:
exclusive_validators = validators[field.validator.__class__.__name__]
exclusive_validators.setdefault(field.validator, set())
exclusive_validators[field.validator].add(field)
def extract_exclusive_headers(ryu_pkt, exclusive_groups):
headers = {}
for validator, fields in exclusive_groups.items():
if not iter(fields).next().is_valid(ryu_pkt):
continue
for field in fields:
headers[field.pyretic_field] = field.decode(ryu_pkt)
break
return headers
def pack_pyretic_headers(pyr_pkt, tmp_pkt, exclusive_groups):
headers = {}
for validator, fields in exclusive_groups.items():
if not iter(fields).next().is_valid(pyr_pkt):
continue
for field in fields:
field.encode_in_place(pyr_pkt, tmp_pkt)
break
return pyr_pkt
def expand(ryu_pkt):
if not isinstance(ryu_pkt, packet.Packet):
ryu_pkt = packet.Packet(ryu_pkt)
headers = {}
for key, exclusive_groups in validators.items():
headers.update( extract_exclusive_headers(ryu_pkt, exclusive_groups) )
return headers
def contract(pyr_pkt):
pkt = packet.Packet(pyr_pkt['raw'])
if len(pyr_pkt['raw']) == 0:
pkt = build_empty_packet(pyr_pkt.get('ethtype', None), pyr_pkt.get('protocol', None))
def convert(h, v):
if isinstance(v, IPAddr):
return str(v)
elif isinstance(v, EthAddr):
return str(v)
else:
return v
pyr_pkt = { h : convert(h, v) for h,v in pyr_pkt.items() }
for key, exclusive_groups in validators.items():
pack_pyretic_headers(pyr_pkt, pkt, exclusive_groups)
pkt.serialize()
pyr_pkt['raw'] = str(pkt.data)
return str(pkt.data)
setattr(self, 'unpack', expand)
setattr(self, 'pack', contract)
# Build the packet processor pipeline
return self
def get_packet_processor():
try:
return get_packet_processor.processor
except AttributeError:
get_packet_processor.processor = Processor().compile()
return get_packet_processor.processor
################################################################################
# Field Validators
################################################################################
class Validator(object):
__slots__ = ['value']
def __init__(self, value):
self.value = value
def __eq__(self, other):
return self.__class__ == other.__class__ and self.value == other.value
def __repr__(self):
return "%s: %s" % (self.__class__.__name__, self.value)
def __hash__(self):
return hash("%s_%s" % (self.__class__.__name__, self.value))
def __call__(self, obj, pkt):
if isinstance(pkt, packet.Packet):
return self.validate_ryu_packet(obj, pkt)
return self.validate_pyretic_packet(obj, pkt)
class ProtocolValidator(Validator):
def validate_ryu_packet(self, obj, ryu_pkt):
try:
return obj.protocol(ryu_pkt, "ipv4").proto == self.value
except:
return False
def validate_pyretic_packet(self, obj, pkt):
try:
return pkt["protocol"] == self.value
except:
return False
class EthertypeValidator(Validator):
def validate_ryu_packet(self, obj, ryu_pkt):
try:
layer = obj.protocol(ryu_pkt, "vlan")
if layer is None: layer = obj.protocol(ryu_pkt, "ethernet")
return layer.ethertype == self.value
except:
return False
def validate_pyretic_packet(self, obj, pkt):
try:
return pkt["ethtype"] == self.value
except:
return False
class VlanValidator(Validator):
def __init__(self):
self.value = VLAN
def validate_ryu_packet(self, obj, ryu_pkt):
try:
return obj.protocol(ryu_pkt, "ethernet").ethertype == VLAN
except:
return False
def validate_pyretic_packet(self, obj, pkt):
if (not 'vlan_id' in pkt) and (not 'vlan_pcp' in pkt):
return True
try: return 'vlan_id' in pkt
except: return False
try: return 'vlan_pcp' in pkt
except: return False
class TrueValidator(Validator):
def validate_ryu_packet(self, obj, ryu_pkt):
return True
def validate_pyretic_packet(self, obj, pkt):
return True
def proto_validator(value):
return ProtocolValidator(value)
def ether_validator(value):
return EthertypeValidator(value)
def true_validator(*args, **kwargs):
return TrueValidator(True)
def vlan_validator(*args, **kwargs):
return VlanValidator()
################################################################################
# Field Decorator
################################################################################
def of_field(match="", pyretic_field="", validator=true_validator(), version="1.0"):
matches = match.split(".")
def _of_field(cls):
if len(matches) == 2: protocol, field = matches
def get_protocol(ryu_pkt, protocol):
for idx, i in enumerate(ryu_pkt.protocols):
if hasattr(i, "protocol_name") and i.protocol_name==protocol:
return idx
return None
def _get_protocol(self, ryu_pkt, _protocol = None):
if _protocol is None:
_protocol = protocol
idx = get_protocol(ryu_pkt, _protocol)
if idx is None: return None
return ryu_pkt.protocols[idx]
def field_decode(self, ryu_pkt):
#if not self.is_valid(ryu_pkt): return None
layer = self.protocol(ryu_pkt)
if layer is None: return None
return getattr(layer, field)
def field_encode(self, pyretic_pkt):
pkt = packet.Packet(pyretic_pkt['raw'])
field_encode_in_place(self, pyretic_pkt, pkt)
pkt.serialize()
pyretic_pkt['raw'] = pkt.data
return pyretic_pkt
def field_encode_in_place(self, pyretic_pkt, pkt):
if not pyretic_field in pyretic_pkt: return pyretic_pkt
layer = self.protocol(pkt)
# Try to create the layer if it's not found
if layer is None: raise ValueError
setattr(layer, field, pyretic_pkt[pyretic_field])
return pyretic_pkt
def is_valid(self, ryu_pkt):
return self.validator(self, ryu_pkt)
# Add this field as an available field
_field_list.setdefault(version, dict())
_field_list[version][cls.__name__] = cls
# Augment field clss with proper attributes and methods
if not hasattr(cls, "protocol"): setattr(cls, "protocol", _get_protocol)
if not hasattr(cls, "validator"): setattr(cls, "validator", validator)
if not hasattr(cls, "is_valid"): setattr(cls, "is_valid", is_valid)
if not hasattr(cls, "decode"): setattr(cls, "decode", field_decode)
if not hasattr(cls, "encode"): setattr(cls, "encode", field_encode)
if not hasattr(cls, "encode_in_place"): setattr(cls, "encode_in_place", field_encode_in_place)
if not hasattr(cls, "pyretic_field"): setattr(cls, "pyretic_field", pyretic_field)
return cls
return _of_field
#######################
# OPENFLOW 1.0 FIELDS
#######################
@of_field("udp.dst_port", "dstport", proto_validator(UDP_PROTO), "1.0")
class UdpDstPort(object): pass
@of_field("udp.src_port", "srcport", proto_validator(UDP_PROTO), "1.0")
class UdpSrcPort(object): pass
@of_field("tcp.dst_port", "dstport", proto_validator(TCP_PROTO), "1.0")
class TcpDstPort(object): pass
@of_field("tcp.src_port", "srcport", proto_validator(TCP_PROTO), "1.0")
class TcpSrcPort(object): pass
@of_field("vlan.pcp", "vlan_pcp", vlan_validator(), "1.0")
class VlanPcp(object):
def encode_in_place(self, pyr, pkt):
if (not 'vlan_id' in pyr and not 'vlan_pcp' in pyr and vlan.vlan in pkt):
for idx, proto in enumerate(pkt.protocols):
if isinstance(proto, vlan.vlan):
pkt.protocols.pop(idx)
return pyr
if not 'vlan_pcp' in pyr:
return pyr
if not vlan.vlan in pkt:
pkt.protocols.insert(1, vlan.vlan(ethertype=pkt.protocols[0].ethertype))
gen = (protocol for protocol in pkt.protocols if protocol.__class__ == vlan.vlan)
vl = gen.next()
vl.pcp = pyr['vlan_pcp']
pkt.protocols[0].ethertype = VLAN
return pyr
@of_field("vlan.vid", "vlan_id", vlan_validator(), "1.0")
class VlanID(object):
def encode_in_place(self, pyr, pkt):
if (not 'vlan_id' in pyr and not 'vlan_pcp' in pyr and vlan.vlan in pkt):
for idx, proto in enumerate(pkt.protocols):
if isinstance(proto, vlan.vlan):
pkt.protocols.pop(idx)
return pyr
if not 'vlan_id' in pyr:
return pyr
if not vlan.vlan in pkt:
pkt.protocols.insert(1, vlan.vlan(ethertype=pkt.protocols[0].ethertype))
gen = (protocol for protocol in pkt.protocols if protocol.__class__ == vlan.vlan)
vl = gen.next()
vl.vid = pyr['vlan_id']
pkt.protocols[0].ethertype = VLAN
return pyr
@of_field("ethernet.src", "srcmac", version="1.0")
class SrcMac(object): pass
@of_field("ethernet.dst", "dstmac", version="1.0")
class DstMac(object): pass
@of_field("", "header_len", version="1.0")
class HeaderLength(object):
def decode(self, ryu_pkt):
return len(ryu_pkt.protocols[0])
def encode_in_place(self, pyr, pkt):
return pyr
@of_field("", "payload_len", version="1.0")
class PayloadLength(object):
def decode(self, ryu_pkt):
return len(ryu_pkt.data)
def encode_in_place(self, pyr, pkt):
return pyr
@of_field("ethernet.ethertype", "ethtype", version="1.0")
class EthType(object):
def decode(self, ryu_pkt):
try:
layer = self.protocol(ryu_pkt, 'vlan')
if layer is None: layer = self.protocol(ryu_pkt, 'ethernet')
return layer.ethertype
except:
return None
@of_field("ipv6.srcip", "srcip", ether_validator(IPV6), version="1.0")
class Ipv6SrcIp(object): pass
@of_field("ipv6.dstip", "dstip", ether_validator(IPV6), version="1.0")
class Ipv6DstIp(object): pass
@of_field("ipv4.src", "srcip", ether_validator(IPV4), version="1.0")
class Ipv4SrcIp(object): pass
@of_field("ipv4.dst", "dstip", ether_validator(IPV4), version="1.0")
class Ipv4DstIp(object): pass
@of_field("ipv4.proto", "protocol", ether_validator(IPV4), version="1.0")
class Protocol(object): pass
@of_field("ipv4.tos", "tos", ether_validator(IPV4), version="1.0")
class TOS(object): pass
@of_field("icmp.type", "srcport", proto_validator(ICMP_PROTO), version="1.0")
class IcmpType(object): pass
@of_field("icmp.code", "dstport", proto_validator(ICMP_PROTO), version="1.0")
class IcmpCode(object): pass
@of_field("arp.opcode", "protocol", ether_validator(ARP), version="1.0")
class ArpOpcode(object): pass
@of_field("arp.src_ip", "srcip", ether_validator(ARP), version="1.0")
class ArpSrcIp(object): pass
@of_field("arp.dst_ip", "dstip", ether_validator(ARP), version="1.0")
class ArpDstIp(object): pass
################################################################################
# Packet
################################################################################
class Packet(object):
__slots__ = ["header"]
def __init__(self, state={}):
self.header = util.frozendict(state)
def available_fields(self):
return self.header.keys()
def __eq__(self, other):
return ( id(self) == id(other)
or ( isinstance(other, self.__class__)
and self.header == other.header ) )
def __ne__(self, other):
return not (self == other)
def modifymany(self, d):
add = {}
delete = []
for k, v in d.items():
if v is None:
delete.append(k)
else:
add[k] = v
return Packet(self.header.update(add).remove(delete))
def modify(self, **kwargs):
return self.modifymany(kwargs)
def virtual(self, layer, item):
v = self.header.get(('v_%s_%s' % (layer, item)), None)
if v is None:
raise KeyError(item)
return v
def __getitem__(self, item):
return self.header[item]
def __hash__(self):
return hash(self.header)
def __repr__(self):
import hashlib
fixed_fields = {}
fixed_fields['location'] = ['switch', 'inport', 'outport']
fixed_fields['vlocation'] = ['vswitch', 'vinport', 'voutport']
fixed_fields['source'] = ['srcip', 'srcmac']
fixed_fields['dest'] = ['dstip', 'dstmac']
order = ['location','vlocation','source','dest']
all_fields = self.header.keys()
outer = []
size = max(map(len, self.header) or map(len, order) or [len('md5'),0]) + 3
### LOCATION, VLOCATION, SOURCE, and DEST - EACH ON ONE LINE
for fields in order:
inner = ["%s:%s" % (fields, " " * (size - len(fields)))]
all_none = True
for field in fixed_fields[fields]:
try:
all_fields.remove(field)
except:
pass
try:
inner.append(repr(self.header[field]))
all_none = False
except KeyError:
inner.append('None')
if not all_none:
outer.append('\t'.join(inner))
### MD5 OF PAYLOAD
field = 'raw'
outer.append("%s:%s%s" % ('md5',
" " * (size - len(field)),
hashlib.md5(self.header[field]).hexdigest()))
all_fields.remove(field)
### ANY ADDITIONAL FIELDS
for field in sorted(all_fields):
try:
if self.header[field]:
outer.append("%s:%s\t%s" % (field,
" " * (size - len(field)),
repr(self.header[field])))
except KeyError:
pass
return "\n".join(outer)
|
import re
from collections import Counter
from collections import defaultdict
import numpy as np
def read_voc_pos_tags_from_conllu_file(filename):
file = open(filename, 'r', encoding="utf8")
pos_tags = []
vocabulary = []
sentences = []
text = file.read()
for sentence in text.split('\n\n'):
s = {}
w2i = defaultdict(lambda: len(w2i))
w2i['0'] = 0
for line in sentence.split('\n'):
if line.startswith('#'):
continue
if line and line != '\n':
line_split = line.split('\t')
# remove sentences which start with integer index with hyphens
if re.match("\d+[-]\d+", line_split[0]):
file.remove(line)
id = w2i[line_split[0]]
s[id] = ([line_split[1].lower(),
line_split[4],
line_split[6],
line_split[7],
line_split[1],
line_split[0]])
pos_tags.append(line_split[4])
vocabulary.append(line_split[1].lower())
golden_labels = []
M = np.zeros((len(s) + 1, len(s) + 1))
for i, w in enumerate(s.keys()):
if s[w][2] == '_':
continue
M[w2i[s[w][2]]][i+1] = 1
golden_labels.append([w2i[s[w][2]], i+1, s[w][3]])
M[0, 0] = 1
if s:
sentences.append([s, M, golden_labels])
return vocabulary, pos_tags, sentences
def read_conllu_file(filename):
vocabulary, pos_tags, sentences = read_voc_pos_tags_from_conllu_file(filename)
vocabulary = set(vocabulary)
pos_tags = list(set(pos_tags))
voc_counter = Counter(vocabulary)
filtered_vocabulary = set()
labels = set()
for s in sentences:
for i, v in s[0].items():
labels.add(v[3])
# replace words that occur once with <unk>
for word in vocabulary:
if voc_counter[word] > 2:
filtered_vocabulary.add(word)
else:
filtered_vocabulary.add('<unk>')
voc_counter = Counter(vocabulary)
w2i = defaultdict(lambda: len(w2i))
t2i = defaultdict(lambda: len(t2i))
l2i = defaultdict(lambda: len(l2i))
for index, word in enumerate(voc_counter):
w2i[word] = index
i2w = {v: k for k, v in w2i.items()}
for index, tag in enumerate(pos_tags):
t2i[tag] = index
for index, label in enumerate(labels):
l2i[label] = index
i2t = {v: k for k, v in t2i.items()}
i2l = {v: k for k, v in l2i.items()}
index_sentences = []
golden_labels = []
for (sentence, _, gl) in sentences:
s = []
for k, v in sentence.items():
s.append((w2i[v[0]], t2i[v[1]]))
l = []
for f, t, label in gl:
l.append([f, t, l2i[label]])
golden_labels.append(l)
index_sentences.append(s)
return (dict(w2i), dict(i2w), dict(t2i), dict(i2t), dict(l2i), dict(i2l),
sentences, index_sentences, golden_labels)
|
#!/usr/bin/env pytest -vs
"""Tests for awssh."""
# Standard Python Libraries
import logging
import os
import sys
from unittest.mock import patch
# Third-Party Libraries
import pytest
# cisagov Libraries
from awssh import awssh
log_levels = (
"debug",
"info",
"warning",
"error",
"critical",
)
# define sources of version strings
RELEASE_TAG = os.getenv("RELEASE_TAG")
PROJECT_VERSION = awssh.__version__
def test_stdout_version(capsys):
"""Verify that version string sent to stdout agrees with the module version."""
with pytest.raises(SystemExit):
with patch.object(sys, "argv", ["bogus", "--version"]):
awssh.main()
captured = capsys.readouterr()
assert (
captured.out == f"{PROJECT_VERSION}\n"
), "standard output by '--version' should agree with module.__version__"
def test_running_as_module(capsys):
"""Verify that the __main__.py file loads correctly."""
with pytest.raises(SystemExit):
with patch.object(sys, "argv", ["bogus", "--version"]):
# F401 is a "Module imported but unused" warning. This import
# emulates how this project would be run as a module. The only thing
# being done by __main__ is importing the main entrypoint of the
# package and running it, so there is nothing to use from this
# import. As a result, we can safely ignore this warning.
# cisagov Libraries
import awssh.__main__ # noqa: F401
captured = capsys.readouterr()
assert (
captured.out == f"{PROJECT_VERSION}\n"
), "standard output by '--version' should agree with module.__version__"
@pytest.mark.skipif(
RELEASE_TAG in [None, ""], reason="this is not a release (RELEASE_TAG not set)"
)
def test_release_version():
"""Verify that release tag version agrees with the module version."""
assert (
RELEASE_TAG == f"v{PROJECT_VERSION}"
), "RELEASE_TAG does not match the project version"
@pytest.mark.parametrize("level", log_levels)
def test_log_levels(level):
"""Validate commandline log-level arguments."""
with patch.object(sys, "argv", ["bogus", f"--log-level={level}", "1", "1"]):
with patch.object(logging.root, "handlers", []):
assert (
logging.root.hasHandlers() is False
), "root logger should not have handlers yet"
return_code = None
try:
awssh.main()
except SystemExit as sys_exit:
return_code = sys_exit.code
assert return_code is None, "main() should return success"
assert (
logging.root.hasHandlers() is True
), "root logger should now have a handler"
assert (
logging.getLevelName(logging.root.getEffectiveLevel()) == level.upper()
), f"root logger level should be set to {level.upper()}"
assert return_code is None, "main() should return success"
def test_bad_log_level():
"""Validate bad log-level argument returns error."""
with patch.object(sys, "argv", ["bogus", "--log-level=emergency", "1", "1"]):
return_code = None
try:
awssh.main()
except SystemExit as sys_exit:
return_code = sys_exit.code
assert return_code == 1, "main() should exit with error"
|
import torch
from torch import nn
from transformers import AutoModel, AutoTokenizer
from datasets import load_dataset
from datasets import Value
from torch.utils.data import DataLoader
from torch import optim
from poutyne import Model, Lambda
from poutyne_transformers import TransformerCollator, ModelWrapper
print("Loading model & tokenizer.")
transformer = AutoModel.from_pretrained(
"distilbert-base-cased", output_hidden_states=True
)
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
custom_model = nn.Sequential(
ModelWrapper(transformer),
Lambda(lambda outputs: outputs["last_hidden_state"][:, 0, :]), # get the cls token
nn.Linear(in_features=transformer.config.hidden_size, out_features=1),
Lambda(lambda out: out.reshape(-1)),
)
print("Loading & preparing dataset.")
dataset = load_dataset("imdb")
dataset = dataset.map(
lambda entry: tokenizer(
entry["text"], add_special_tokens=True, padding="max_length", truncation=True
),
batched=True,
)
dataset = dataset.remove_columns(["text"])
for split in ("train", "test"):
new_features = dataset[split].features.copy()
new_features["label"] = Value("float64")
dataset[split] = dataset[split].cast(new_features)
dataset = dataset.shuffle()
dataset.set_format("torch")
collate_fn = TransformerCollator(y_keys="labels", remove_labels=True)
train_dataloader = DataLoader(dataset["train"], batch_size=16, collate_fn=collate_fn)
test_dataloader = DataLoader(dataset["test"], batch_size=16, collate_fn=collate_fn)
print("Preparing training.")
optimizer = optim.AdamW(custom_model.parameters(), lr=5e-5)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = Model(
custom_model, optimizer, loss_function=nn.BCEWithLogitsLoss(), device=device
)
print("Starting training.")
model.fit_generator(train_dataloader, test_dataloader, epochs=1)
|
import numpy as np
import sys
from datetime import datetime
from datetime import timedelta
import matplotlib.pyplot as plt
from tools_TC202010 import read_nc, prep_proj_multi_cartopy, setup_grids_cartopy, draw_rec_4p, get_besttrack, read_obs_letkf
import cartopy.crs as ccrs
import cartopy.feature as cfeature
quick = True
#quick = False
def main( INFO={}, exp1="D1", exp2="NOHIM8_8km", otime=datetime( 2020, 9, 1, 0, 0 ) ):
tlons, tlats, tslps, ttimes = get_besttrack()
fn1 = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/scale-5.4.3/OUTPUT/TC2020/{0:}/const/topo_sno_np00001/topo.pe000000.nc".format( exp1 )
lon2d_d1 = read_nc( nvar="lon", fn=fn1 )
lat2d_d1 = read_nc( nvar="lat", fn=fn1 )
topo2d_d1 = read_nc( nvar="topo", fn=fn1 )
print( "size:", topo2d_d1.shape )
if exp2 != "":
fn2 = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/scale-5.4.3/OUTPUT/TC2020/{0:}/const/topo_sno_np00001/topo.pe000000.nc".format( exp2 )
lon2d_d2 = read_nc( nvar="lon", fn=fn2 )
lat2d_d2 = read_nc( nvar="lat", fn=fn2 )
topo2d_d2 = read_nc( nvar="topo", fn=fn2 )
lon_l = [ np.min( lon2d_d2 ), np.max( lon2d_d2) ]
lat_l = [ np.min( lat2d_d2 ), np.max( lat2d_d2) ]
lone = np.max( lon2d_d1 )
lons = np.min( lon2d_d1 )
late = np.max( lat2d_d1 )
lats = np.min( lat2d_d1 )
lons = 81
lone = 179
lats = 1
late = 60
print( "region", lons, lone, lats, late )
# original data is lon/lat coordinate
data_crs = ccrs.PlateCarree()
fig = plt.figure( figsize=(7,4.5) )
fig.subplots_adjust( left=0.09, bottom=0.06, right=0.96, top=0.96,
wspace=0.15, hspace=0.05)
if INFO["proj"] == 'merc':
ax_l = prep_proj_multi_cartopy( fig, xfig=1, yfig=1, proj=INFO["proj"],
latitude_true_scale=INFO['latitude_true_scale'] )
elif INFO["proj"] == 'PlateCarree':
ax_l = prep_proj_multi_cartopy( fig, xfig=1, yfig=1, proj=INFO["proj"],
central_longitude=INFO['central_longitude'] )
elif INFO["proj"] == 'lcc':
ax_l = prep_proj_multi_cartopy( fig, xfig=1, yfig=1, proj=INFO["proj"],
tlat1=INFO['tlat1'], tlat2=INFO['tlat2'],
central_longitude=INFO['central_longitude'],
central_latitude=INFO['central_latitude'],
latitude_true_scale=INFO['latitude_true_scale'] )
xticks = np.arange( 40, 205, 10 )
yticks = np.arange( 0, 85, 10 )
res = '50m'
coast = cfeature.NaturalEarthFeature( 'physical', 'coastline', res,
facecolor='none',
edgecolor='k', )
land = cfeature.NaturalEarthFeature( 'physical', 'land', res,
edgecolor='face',
facecolor=cfeature.COLORS['land'] )
ocean = cfeature.NaturalEarthFeature( 'physical', 'ocean', res,
edgecolor='face',
facecolor=cfeature.COLORS['water'] )
ax = ax_l[0]
if INFO["proj"] == 'merc':
setup_grids_cartopy( ax, xticks=xticks, yticks=yticks,
fs=9, lw=0.25, color='k' )
else:
setup_grids_cartopy( ax, xticks=xticks, yticks=yticks,
fs=9, lw=0.25, color='k' )
ax.set_extent([ lons, lone, lats, late ], crs=data_crs )
ax.add_feature( coast, zorder=1 )
ax.add_feature( land )
ax.add_feature( ocean )
# ax.contourf( lon2d_d1, lat2d_d1, topo2d_d1,
# transform=data_crs )
# draw domain
lc = 'k'
ax.plot( lon2d_d1[0,:], lat2d_d1[0,:], color=lc,
transform=data_crs )
ax.plot( lon2d_d1[-1,:], lat2d_d1[-1,:], color=lc,
transform=data_crs )
ax.plot( lon2d_d1[:,-1], lat2d_d1[:,-1], color=lc,
transform=data_crs )
ax.plot( lon2d_d1[:,0], lat2d_d1[:,0], color=lc,
transform=data_crs )
if exp2 != "":
ax.plot( lon2d_d2[0,:], lat2d_d2[0,:], color=lc,
transform=data_crs )
ax.plot( lon2d_d2[-1,:], lat2d_d2[-1,:], color=lc,
transform=data_crs )
ax.plot( lon2d_d2[:,-1], lat2d_d2[:,-1], color=lc,
transform=data_crs )
ax.plot( lon2d_d2[:,0], lat2d_d2[:,0], color=lc,
transform=data_crs )
print( "check", np.max( lat2d_d2[0,:] ) )
print( "check", np.max( lat2d_d1[0,:] ) )
# draw typhoon
ax.plot( tlons, tlats, transform=data_crs, color='k' )
ms = 5
for tidx in range( len(tlons) ):
ttime_ = ttimes[tidx]
if ttime_.day > 5 and ttime_.month == 9:
break
if ttime_.hour == 0:
ax.plot( tlons[tidx], tlats[tidx], transform=data_crs,
marker='o', ms=ms, color='k' )
if ttime_.day < 27:
ax.text( tlons[tidx]-0.5, tlats[tidx], ttimes[tidx].strftime('%m/%d'),
fontsize=10, transform=data_crs,
ha="right",
va='top',
)
# # plot obs
# obs = read_obs_letkf( time=otime )
# olons = obs[:,2]
# olats = obs[:,3]
# ms = 20
# print( olons, olats )
# ax.scatter( olons, olats, s=0.1, transform=data_crs, zorder=4,
# )
# print( obs.shape )
plt.show()
sys.exit()
fig, ( (ax1, ax2) ) = plt.subplots( 1, 2, figsize=( 10, 4.1 ) )
fig.subplots_adjust( left=0.04, bottom=0.02, right=0.98, top=0.96,
wspace=0.1, hspace=0.3)
ax_l = [ ax1, ax2 ] #
m_l = prep_proj_multi( method='merc', ax_l=ax_l, ll_lon=lons, ur_lon=lone,
ll_lat=lats, ur_lat=late, fs=7, cc='gray', cw=0.3 )
lon2d_l = [ lon2d_d1, lon2d_gfs]
lat2d_l = [ lat2d_d1, lat2d_gfs]
var_l = [ mslp_d1, mslp_gfs ]
levs = np.arange( 800, 1100, 4 )
fac = 1.e-2
lw =0.5
tit_l = [
"SCALE-LETKF: D1",
"GFS",
]
for i, ax in enumerate( ax_l ):
x2d, y2d = m_l[0]( lon2d_l[i], lat2d_l[i] )
cont = ax.contour( x2d, y2d, var_l[i]*fac,
levels=levs, linewidths=lw, colors='k' )
ax.clabel( cont, fmt='%1.0f', fontsize=8, )
ax.text( 0.5, 1.01, tit_l[i],
fontsize=13, transform=ax.transAxes,
ha="center",
va='bottom',
)
ctime = time.strftime('%HUTC %m/%d/%Y')
ax.text( 1.0, 1.01, ctime,
fontsize=10, transform=ax.transAxes,
ha="right",
va='bottom',
)
fig.suptitle( "Analyzed MSLP (Pa)")
ofig = "D1_GFS_MSLP_{0:}".format( time.strftime('%Y%m%d%H%M'), )
plot_or_save( quick=quick, opath="png/2p_mslp", ofig=ofig )
###########
time = datetime( 2020, 9, 5, 0, 0 )
stime = datetime( 2020, 9, 1, 0, 0 )
etime = datetime( 2020, 9, 7, 12, 0 )
#stime = datetime( 2020, 9, 2, 0, 0 )
stime = datetime( 2020, 9, 3, 0, 0 )
#etime = datetime( 2020, 8, 29, 0, 0 )
etime = datetime( 2020, 9, 5, 0, 0 )
#etime = stime
exp2 = ""
exp2 = "NOHIM8_8km_3"
exp2 = "NOHIM8_8km_4"
exp2 = "NOHIM8_8km_5"
exp2 = "NOHIM8_8km_6"
exp2 = "NOHIM8_8km_7"
exp2 = "NOHIM8_8km_8"
exp2 = "NOHIM8_8km_9"
exp1 = "D1/D1_20210629"
exp2 = "D2/NOHIM8_4km"
#proj = "lcc"
proj = 'PlateCarree'
INFO = { 'proj': proj,
'latitude_true_scale': 30.0,
'tlat1':20.0,
'tlat2':40.0,
'central_longitude': 130.0,
'central_latitude': 30.0,
}
main( INFO=INFO, exp1=exp1, exp2=exp2 )
|
# explore also shapely.strtree.STRtree
from geosqlite import Writer, Reader
from shapely.wkb import loads as wkb_loads
from shapely.wkt import loads as wkt_loads
from shapely.geometry import shape, asShape, mapping
import pythongis as pg
from time import time
def timings():
print 'loading'
data = pg.VectorData(r"C:\Users\kimok\Desktop\gazetteer data\raw\global_urban_extent_polygons_v1.01.shp", encoding='latin')
#data = list(pg.VectorData(r"C:\Users\kimok\Desktop\gazetteer data\raw\ne_10m_admin_0_countries.shp", encoding='latin')) * 3
print len(data)
print 'making shapely (no copy)'
t = time()
shapelys = [asShape(f.geometry) for f in data]
print time()-t
print 'making shapely (copy)'
t = time()
shapelys = [shape(f.geometry) for f in data]
print time()-t
print 'dump geoj (interface)'
t = time()
geojs = [s.__geo_interface__ for s in shapelys]
print time()-t
##print 'dump geoj (mapping)'
##t = time()
##geojs = [mapping(s) for s in shapelys]
##print time()-t
print 'load geoj asShape (no copy)'
t = time()
shapelys = [asShape(geoj) for geoj in geojs]
print time()-t
print 'load geoj shape (copy)'
t = time()
shapelys = [shape(geoj) for geoj in geojs]
print time()-t
print 'dump wkt'
t = time()
wkts = [s.wkt for s in shapelys]
print time()-t
print 'load wkt'
t = time()
shapelys = [wkt_loads(wkt) for wkt in wkts]
print time()-t
print 'dump wkb'
t = time()
wkbs = [s.wkb for s in shapelys]
print time()-t
print 'load wkb'
t = time()
shapelys = [wkb_loads(wkb) for wkb in wkbs]
print time()-t
def sqlite_geoms():
print 'load shapefile'
t = time()
#data = pg.VectorData(r"C:\Users\kimok\Desktop\gazetteer data\raw\global_urban_extent_polygons_v1.01.shp", encoding='latin')
#data = pg.VectorData(r"C:\Users\kimok\Desktop\gazetteer data\raw\atlas_urban.geojson", encoding='latin')
data = pg.VectorData(r"C:\Users\kimok\Desktop\gazetteer data\raw\global_settlement_points_v1.01.shp", encoding='latin')
#data = pg.VectorData(r"C:\Users\kimok\Desktop\gazetteer data\raw\ne_10m_admin_0_countries.shp", encoding='latin')
print time()-t
print 'making shapely'
t = time()
shapelys = [shape(f.geometry) for f in data] # CRUCIAL SPEEDUP, SHAPELY SHOULD BE FROM SHAPE, NOT ASSHAPE WHICH IS INDIRECT REFERENCING
print time()-t
print 'dump wkb'
t = time()
wkbs = [s.wkb for s in shapelys]
print time()-t
print 'convert to binary'
from sqlite3 import Binary
t = time()
blobs = [Binary(wkb) for wkb in wkbs]
print time()-t
print 'insert wkb into db'
fields = ['ID', 'geom']
typs = ['int', 'BLOB']
w = Writer('testgeodb::data', fields=zip(fields,typs), replace=True)
t = time()
for i,blb in enumerate(blobs):
w.add([i, blb])
print time()-t
print 'load wkb from db'
t = time()
shapelys = [wkb_loads(bytes(blb)) for ID,blb in w.select('*')]
print time()-t
## print 'insert wkt into db'
## fields = ['ID', 'geom']
## typs = ['int', 'text']
## w.db.close()
## w = Writer('testgeodb::data', fields=zip(fields,typs), replace=True)
## t = time()
## for i,s in enumerate(shapelys):
## w.add([i, s.wkt])
## print time()-t
##
## print 'load wkt from db'
## t = time()
## shapelys = [wkt_loads(wkt) for ID,wkt in w.select('*')]
## print time()-t
########
#timings()
sqlite_geoms()
|
#!/usr/bin/env python3
from gene_disease import restClient, geneClient, getGeneList
import multiprocessing as mp
from nested_dict import nested_dict
import sqlite3
from sqlite3 import Error
import collections
import dill
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return None
def make_table(conn):
"""
Make the table to store the impact:disease data
:param conn: the Connection object
:return:
"""
cur = conn.cursor()
cur.execute('''create table if not exists impact_disease( gene TEXT default NULL, impact TEXT default NULL, disease text default NULL )''')
cur.execute('''delete from impact_disease''')
return None
def get_genes(conn):
"""
Get the genes out of the gemini database
:param conn: the Connection object
:return:
"""
cur = conn.cursor()
cur.execute("select distinct gene from gene_summary where is_hgnc = 1 and hgnc_id is not null")
rows = cur.fetchall()
allrows = []
for r in rows:
allrows.append(r[0])
return allrows
def load_table(conn,row_data):
"""
Load the retrieved data
:param conn: the Connection object
:return:
"""
cur = conn.cursor()
for rowd in row_data:
for row in rowd:
if row.error == "":
cur.execute('''insert into impact_disease(gene,impact,disease) VALUES (?,?,?)''', (row.gene,row.impact,row.disease))
else:
print("error from {} => {}".format(row.gene,row.error))
conn.commit()
def getginfoasync(geneList,omim_key):
g_information = []
#pool = mp.Pool(mp.cpu_count())
pool = mp.Pool(100)
#result_objects = [pool.apply_async(getGeneList, args=(gene, omim_key)) for gene in geneList]
g_information = pool.starmap_async(getGeneList, [(gene, omim_key) for i, gene in enumerate(geneList)]).get()
#print(results)
# result_objects is a list of pool.ApplyResult objects
# g_information = [r.get()[0] for r in result_objects]
pool.close()
#pool.join()
return g_information
def getginfo(geneList,omim_key):
g_information = []
pool = mp.Pool(mp.cpu_count())
#pool = mp.Pool(15)
g_information = [pool.apply(getGeneList, args=(gene,omim_key)) for gene in geneList]
#g_information = pool.starmap(getGeneList, [(gene, omim_key) for gene in geneList])
#g_information = Parallel(n_jobs=100)(delayed(getGeneList)(gene,omim_key) for gene in geneList)
pool.close()
return g_information
if __name__ == '__main__':
gene_db = "Torrent.db"
impact_disease_db = "acmg_id.db"
omim_key = "Wqy5lssmS7uWGdpyy8H9zw"
geneList = []
# create database connections
print("connecting to {}".format(gene_db),flush=True)
gene_conn = create_connection(gene_db)
print("connecting to {}".format(impact_disease_db),flush=True)
id_conn = create_connection(impact_disease_db)
# make the table
print("making impact_disease database",flush=True)
make_table(id_conn)
# get the genes from the database
print("getting genes",flush=True)
#geneList = ['IRF6','MT-ND4','VWA1']
geneList = get_genes(gene_conn)
# get the information
print("getting gene information",flush=True)
#g_information = getginfo(geneList,omim_key)
g_information = getginfoasync(geneList,omim_key)
# pickle it
#filename = '/home/richard/pipeline/scripts/Phenoparser/scripts/g_information.pickle'
#with open(filename, 'wb') as f:
# dill.dump(g_information, f)
print("collected information for {} genes".format(len(g_information)),flush=True)
# load the data
print("loading gene information",flush=True)
load_table(id_conn,g_information)
print("closing connections",flush=True)
gene_conn.close()
id_conn.close()
|
import random
command = '''
welcome to world of game!
Even me(sidhant) developer doesnot know the
correct guess ,its generated randomly.
best luck!
'''
print(command)
op = ""
num = ''
number = random.randint(40,50)
out = {
number:'👌👏',
'1':'one',
'2':'two',
'3':'three',
'4':'four',
'5':'five',
'6':'six',
'7':'seven',
'8':'eight',
'9':'nine',
'0':'zero'
}
while num != number:
n = input('guess: ')
num = int(n)
if num == number:
print('congratulations! u guess right')
elif num < 35:
print('guess higher')
elif num > 55:
print('guess lower')
else:
print('closer!')
print('done!')
print(out[number])
if num == number:
for ch in n:
op += out.get(ch, ch) + " "
print(f'correct guess is {op}')
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Partial implementation for bundling header and modulemaps for static frameworks."""
load(
"@build_bazel_rules_apple//apple/bundling:bundling_support.bzl",
"bundling_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:framework_support.bzl",
"framework_support",
)
load(
"@build_bazel_rules_apple//apple/internal:intermediates.bzl",
"intermediates",
)
load(
"@build_bazel_rules_apple//apple/internal:processor.bzl",
"processor",
)
load(
"@bazel_skylib//lib:partial.bzl",
"partial",
)
def _static_framework_header_modulemap_partial_impl(ctx, hdrs, binary_objc_provider):
"""Implementation for the static framework headers and modulemaps partial."""
bundle_name = bundling_support.bundle_name(ctx)
bundle_files = []
umbrella_header_name = None
if hdrs:
umbrella_header_name = "{}.h".format(bundle_name)
umbrella_header_file = intermediates.file(ctx.actions, ctx.label.name, umbrella_header_name)
framework_support.create_umbrella_header(
ctx.actions,
umbrella_header_file,
sorted(hdrs),
)
bundle_files.append(
(processor.location.bundle, "Headers", depset(hdrs + [umbrella_header_file])),
)
else:
umbrella_header_name = None
sdk_dylibs = getattr(binary_objc_provider, "sdk_dylib", None)
sdk_frameworks = getattr(binary_objc_provider, "sdk_framework", None)
# Create a module map if there is a need for one (that is, if there are
# headers or if there are dylibs/frameworks that the target depends on).
if any([sdk_dylibs, sdk_dylibs, umbrella_header_name]):
modulemap_file = intermediates.file(ctx.actions, ctx.label.name, "module.modulemap")
framework_support.create_modulemap(
ctx.actions,
modulemap_file,
bundle_name,
umbrella_header_name,
sorted(sdk_dylibs and sdk_dylibs.to_list()),
sorted(sdk_frameworks and sdk_frameworks.to_list()),
)
bundle_files.append((processor.location.bundle, "Modules", depset([modulemap_file])))
return struct(
bundle_files = bundle_files,
)
def static_framework_header_modulemap_partial(hdrs, binary_objc_provider):
"""Constructor for the static framework headers and modulemaps partial.
This partial bundles the headers and modulemaps for static frameworks.
Args:
hdrs: The list of headers to bundle.
binary_objc_provider: The ObjC provider for the binary target.
Returns:
A partial that returns the bundle location of the static framework header and modulemap
artifacts.
"""
return partial.make(
_static_framework_header_modulemap_partial_impl,
hdrs = hdrs,
binary_objc_provider = binary_objc_provider,
)
|
from unittest import mock
from github3.orgs import ShortOrganization, ShortTeam
from github3.repos import ShortRepository
from github3.users import ShortUser
from git_sentry.handlers.git_org import GitOrg
from git_sentry.handlers.git_repo import GitRepo
from git_sentry.handlers.git_team import GitTeam
from git_sentry.handlers.git_user import GitUser
def mock_org(org_name, number_of_repos=0, owner=None, team_names=None):
if not team_names:
team_names = []
mocked_org = mock.Mock(spec=ShortOrganization)
mocked_org.login = org_name
mocked_org.name = org_name
existing_members = []
existing_admins = []
repositories = []
teams = [mock_team(team_name, org_name) for team_name in team_names]
for i in range(number_of_repos):
repo = mock_repo(org_name, f'repo{i}')
repo.archived = False
repositories.append(repo.raw_object())
mocked_org.repositories.return_value = repositories
if owner:
existing_admins += [owner]
def members(role=None):
final_members = existing_admins + existing_members
if role == 'admin':
final_members = existing_admins
if role == 'member':
final_members = existing_members
return [mock_user(m).raw_object() for m in final_members]
def add_member(raw_mock_member, role=None):
if not role:
role = 'member'
if role == 'member':
if raw_mock_member not in existing_admins:
existing_members.append(raw_mock_member)
else:
existing_admins.append(raw_mock_member)
if raw_mock_member in existing_members:
existing_members.remove(raw_mock_member)
def new_team(team_name, repo_names=None, permission=None, privacy=None, description=None):
mocked_team = mock_team(team_name, mocked_org.login)
if mocked_team not in teams:
teams.append(mocked_team)
return mocked_team.raw_object()
def get_teams():
return [t.raw_object() for t in teams]
def retrieve_membership(raw_mock_member):
if raw_mock_member in existing_admins:
return {'role': 'admin'}
if raw_mock_member in existing_members:
return {'role': 'member'}
return {'role': 'None'}
mocked_org.members.side_effect = members
mocked_org.add_or_update_membership.side_effect = add_member
mocked_org.teams.side_effect = get_teams
mocked_org.create_team.side_effect = new_team
mocked_org.membership_for.side_effect = retrieve_membership
return GitOrg(mocked_org)
def mock_repo(parent, repo_name, team_permission=None):
mocked_repo = mock.MagicMock(spec=ShortRepository)
mocked_repo.name = repo_name
mocked_repo.login = repo_name
mocked_repo.full_name = f'{parent}/{repo_name}'
if team_permission is not None:
mocked_repo.as_dict.return_value = {'permissions': {team_permission: True}}
return GitRepo(mocked_repo)
def mock_user(login):
mocked_user = mock.MagicMock(spec=ShortUser)
mocked_user.login = login
mocked_user.name = login
return GitUser(mocked_user)
def mock_team(team_name, organization):
mocked_team = mock.Mock(spec=ShortTeam)
mocked_team.login = team_name
mocked_team.name = team_name
mocked_team.organization = organization
members = []
maintainers = []
repos = {}
def add_member(raw_mock_member, role=None):
if not role:
role = 'member'
if role == 'member':
if raw_mock_member not in maintainers and raw_mock_member not in members:
members.append(raw_mock_member)
else:
if raw_mock_member not in maintainers:
maintainers.append(raw_mock_member)
if raw_mock_member in members:
members.remove(raw_mock_member)
def get_members(role=None):
final_members = maintainers + members
if role == 'maintainer':
final_members = maintainers
if role == 'member':
final_members = members
return [mock_user(m).raw_object() for m in final_members]
def add_to_repo(mock_repo, permission):
repos[mock_repo] = permission
def get_repos():
return [mock_repo(organization, repo, permission).raw_object() for repo, permission in repos.items()]
mocked_team.add_or_update_membership.side_effect = add_member
mocked_team.add_repository.side_effect = add_to_repo
mocked_team.members.side_effect = get_members
mocked_team.repositories.side_effect = get_repos
return GitTeam(mocked_team)
|
import magma as m
def verilog_name(name):
if isinstance(name, m.ref.DefnRef):
return str(name)
if isinstance(name, m.ref.ArrayRef):
array_name = verilog_name(name.array.name)
return f"{array_name}_{name.index}"
if isinstance(name, m.ref.TupleRef):
tuple_name = verilog_name(name.tuple.name)
index = name.index
try:
int(index)
# python/coreir don't allow pure integer names
index = f"_{index}"
except ValueError:
pass
return f"{tuple_name}_{index}"
raise NotImplementedError(name, type(name))
def verilator_name(name):
name = verilog_name(name)
# pg 21 of verilator 4.018 manual
# To avoid conicts with Verilator's internal symbols, any double
# underscore are replaced with ___05F (5F is the hex code of an underscore.)
return name.replace("__", "___05F")
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
ISO request molecule design pool set association table.
"""
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Table
from sqlalchemy.schema import PrimaryKeyConstraint
__docformat__ = "reStructuredText en"
__all__ = ['create_table']
def create_table(metadata, iso_request_tbl, molecule_design_pool_set_tbl):
"Table factory"
tbl = Table('iso_request_pool_set', metadata,
Column('iso_request_id', Integer,
ForeignKey(iso_request_tbl.c.iso_request_id,
ondelete='CASCADE', onupdate='CASCADE'),
nullable=False),
Column('molecule_design_pool_set_id', Integer,
ForeignKey(molecule_design_pool_set_tbl.\
c.molecule_design_pool_set_id),
nullable=False, unique=True),
)
PrimaryKeyConstraint(tbl.c.iso_request_id)
return tbl
|
u"""Cheshire3 ResultSetStore Unittests.
A ResultSet is collection of results, commonly pointers to Record typically
created in response to a search on a Database.
A ResultSetStore is a persistent storage mechanism for ResultSet objects.
ResultSetStore configurations may be customized by the user. For the purposes
of unittesting, configuration files will be ignored and ResultsetStore
instances will be instantiated using configurations defined within this testing
module, and tests carried out on those instances using data defined in this
module.
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
from lxml import etree
from cheshire3.resultSet import ResultSet, SimpleResultSet, SimpleResultSetItem
from cheshire3.resultSetStore import BdbResultSetStore
from cheshire3.test.testBaseStore import SimpleStoreTestCase, BdbStoreTestCase
class ResultSetStoreTestCase(SimpleStoreTestCase):
def _get_test_resultSets(self):
for x in range(5):
rs = SimpleResultSet(self.session)
for y in range(5):
occs = 5 - x
rs.append(SimpleResultSetItem(self.session,
id=y,
recStore="recordStore",
occs=occs,
database="",
diagnostic=None,
weight=0.5,
resultSet=None,
numeric=None)
)
yield rs
def test_store_data(self):
"Check that ResultSet is stored without alteration to copy in memory."
for inRs in self._get_test_resultSets():
# Get a representation of the ResultSet
items = [(i.id, i.recordStore, i.occurences, i.weight)
for i
in inRs]
# Store the ResultSet
self.testObj.create_resultSet(self.session, inRs)
# Check that fetched ResultSet is unaltered
new_items = [(i.id, i.recordStore, i.occurences, i.weight)
for i
in inRs]
self.assertListEqual(new_items,
items,
u"Returned ResultSet altered while storing")
def test_storeFetch_data(self):
"Check that Resultset is stored and retrieved without alteration."
for inRs in self._get_test_resultSets():
# Store the ResultSet
identifier = self.testObj.create_resultSet(self.session, inRs)
# Fetch the ResultSet
outRs = self.testObj.fetch_resultSet(self.session, identifier)
# Check returned object is instance of ResultSet
self.assertIsInstance(outRs, ResultSet)
# Check that returned doc content is unaltered
self.assertEqual(outRs.serialize(self.session),
inRs.serialize(self.session),
u"Fetched ResultSet not same as stored")
class BdbResultSetStoreTestCase(ResultSetStoreTestCase, BdbStoreTestCase):
@classmethod
def _get_class(cls):
return BdbResultSetStore
def _get_config(self):
return etree.XML('''\
<subConfig type="recordStore" id="{0.__name__}">
<objectType>{0.__module__}.{0.__name__}</objectType>
<paths>
<path type="defaultPath">{1}</path>
<path type="databasePath">{0.__name__}.bdb</path>
</paths>
</subConfig>'''.format(self._get_class(), self.defaultPath))
def load_tests(loader, tests, pattern):
# Alias loader.loadTestsFromTestCase for sake of line lengths
ltc = loader.loadTestsFromTestCase
suite = ltc(BdbResultSetStoreTestCase)
return suite
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity=2)
tr.run(load_tests(unittest.defaultTestLoader, [], 'test*.py'))
|
from django.apps import AppConfig
class WarriorsConfig(AppConfig):
name = 'warriors'
|
#!/usr/bin/evn python3
"""
This script loads a pretrained NN model and .
Created by: Vice, 16.10.2021
"""
import torch
import torch.nn as nn
import numpy as np
import gc
from NN_dataset import test_loader
import math
from NN_params import *
# Append the required sys.path for accessing utilities and save the data directory
import os
import sys
curr_dir = os.path.dirname(os.path.realpath(__file__))
models_dir = os.getcwd() + "/models/Bertybob"
os.chdir("..")
sys.path.append(models_dir)
os.chdir(curr_dir)
# Input the model name and the desired number of reconstructions
model_name = "Bertybob" # Same as in NN_train_and_save.py script
num_of_reconstructions = 10
model_path = models_dir + "/" + model_name + ".pt"
state_dict_path = model_path[:-3] + "_state_dict"
# Create the output folder
output_folder = curr_dir + "/output" + "/" + model_name
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# Load the model and the parameters
from NN_params import *
from NN_model import *
gc.collect() # Release the memmory
model = torch.load(model_path)
model.load_state_dict(torch.load(state_dict_path))
model.eval()
print("------ Making inference ------ \n")
print("Model name: ", model_name)
print("\n")
i = 0
with torch.no_grad():
for y in test_loader:
test = model(y)
for idx, output_params in enumerate(test.numpy()):
print("Bunny {0} printing parameters:".format(idx + 1))
print("Laser power: ", np.round(output_params[0] * 100, 2))
print("Laser speed: ", np.round(output_params[1] * 10000, 2))
print("Hatch spacing: ", np.round(output_params[2], 2))
print("\n")
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
def color_to_grayscale(color_array: np.ndarray) -> np.ndarray:
return cv2.cvtColor(color_array, cv2.COLOR_BGR2GRAY)
def calcPSD(input_image, output_image, flag):
# Complex input image with zero imaginary component
X = np.fft.rfft2(input_image).real
np.power(X, 2, out=X)
# X += 1
# np.log(X, out=X)
# # plt.imshow(X, cmap="gray")
# plt.imshow(X)
# plt.show()
print(X)
complex_image = np.zeros(shape=(input_image.shape[0], input_image.shape[1], 2), dtype=np.float32)
complex_image[:, :, 0] = input_image
cv2.dft(complex_image, dst=complex_image)
complex_image[:, 0, :] = 0 # Hmm
psd = cv2.magnitude(complex_image[:, :, 0], complex_image[:, :, 1])
cv2.pow(psd, 2, dst=psd)
print(psd)
if flag:
imlog = psd + 1
np.log(imlog, out=imlog)
output_image = imlog
else:
output_image = psd
# print(output_image)
plt.imshow(X, cmap="gray")
plt.show()
if __name__ == "__main__":
image = cv2.imread("test4_split0.png")
# image = cv2.imread("test.png")
image = color_to_grayscale(image)
print("image type: ", image.dtype)
new_size = image.shape[0] & -2, image.shape[1] & -2 # Even number of rows / columns
new_image = np.zeros(shape=new_size, dtype=np.float32)
new_image[:, :] = image[:new_size[0], :new_size[1]]
calcPSD(new_image, new_size, 0)
print("Success!")
|
import actions
from loader import Loader
LOADER = Loader()
def lambda_handler(event, context):
try:
LOADER.forecast_cli.delete_dataset_import_job(
DatasetImportJobArn=event['DatasetImportJobArn']
)
actions.take_action_delete(
LOADER.forecast_cli.describe_dataset_import_job(
DatasetImportJobArn=event['DatasetImportJobArn']
)['Status']
)
except (LOADER.forecast_cli.exceptions.ResourceNotFoundException, KeyError):
LOADER.logger.info('Import job not found! Passing.')
return event
|
from typing import List
import pandas as pd
from sqlalchemy import Integer
from sqlalchemy.sql.schema import Column
from datapipe.datatable import DataStore, DataTable
from datapipe.compute import Pipeline, Catalog, Table, DatatableTransform
from datapipe.core_steps import BatchGenerate
from datapipe.run_config import RunConfig
from datapipe.store.database import DBConn
from datapipe.store.pandas import TableStoreJsonLine
from datapipe.cli import main
catalog = Catalog({
'input': Table(
store=TableStoreJsonLine(
filename='input.json',
primary_schema=[
Column('id', Integer, primary_key=True),
],
)
),
'result': Table(
store=TableStoreJsonLine(
filename='result.json',
primary_schema=[
Column('result_id', Integer, primary_key=True)
],
)
)
})
def generate_data():
yield pd.DataFrame({
'id': range(10),
'a': [f'a{i}' for i in range(10)],
})
def count(ds: DataStore, input_dts: List[DataTable], output_dts: List[DataTable], run_config: RunConfig = None) -> None:
assert len(input_dts) == 1
assert len(output_dts) == 1
input_dt = input_dts[0]
output_dt = output_dts[0]
output_dt.store_chunk(
pd.DataFrame({
'result_id': [0],
'count': [len(input_dt.meta_table.get_existing_idx())]
})
)
pipeline = Pipeline([
BatchGenerate(
generate_data,
outputs=['input'],
),
DatatableTransform(
count,
inputs=['input'],
outputs=['result'],
)
])
ds = DataStore(DBConn('sqlite:///metadata.sqlite'))
main(ds, catalog, pipeline)
|
__version__ = "0.1.1"
from .core import find, MarkerNotFound
|
#!/usr/bin/env python
import os
import uuid
import logging
from ConfigParser import ConfigParser
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.error import NoRouteError
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
from client import Rover
logger = logging.getLogger(__name__)
class Component(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
print(details)
dir(details)
self.conf = self.config.extra['config']
self.id_ = str(self.conf.get('rover', 'id') or uuid.uuid4())
self.rover = self.config.extra['rover']
self.rate = float(self.conf.get('rover', 'rate'))
# TODO Lookup exact IP address
self.host = '192.168.1.2{}'.format(self.id_.zfill(2))
self.camera_uri = 'http://{}/html/cam_pic_new.php?pDelay=40000'.format(self.host)
yield self.subscribe(self.on_navigation_update, 'mars.rover.' + self.id_ + '.navigation')
yield self.subscribe(self.on_signal, 'mars.rover.' + self.id_ + '.signal')
while True:
self.publish('mars.rover.' + self.id_ + '.heartbeat')
self.publish('mars.rover.' + self.id_ + '.sensors', self.get_sensors())
yield sleep(self.rate)
def on_navigation_update(self, left_motor, right_motor):
self.log.debug('{}, {}'.format(left_motor, right_motor))
self.rover.set_motors(float(left_motor), float(right_motor))
def on_reboot_signal(self):
self.log.info('Rebooting system')
self.rover.reboot()
self.leave()
def on_signal(self, signal):
signal = signal.lower()
self.log.info('Recieved signal {}'.format(signal))
if signal in ['stop', 'shutdown', 'reboot', 'update']:
result = getattr(self.rover, signal)()
if not self.rover.is_running:
self.leave()
def get_sensors(self):
return {
'range': self.rover.get_range(),
'camera': self.camera_uri
}
def onLeave(self, details):
self.log.info('Leaving...')
self.disconnect()
def onDisconnect(self):
reactor.stop()
self.log.info('Disconnected')
def main():
config = ConfigParser()
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.ini')
config.read(config_file)
logger.setLevel(getattr(logging, config.get('logging', 'level')))
logging.basicConfig(format='[%(levelname)-5s] %(asctime)-15s %(name)10s %(message)s')
host = config.get('main', 'host')
port = config.get('main', 'port')
address = u'ws://{}:{}/ws'.format(host, port)
realm = u'mars'
logger.info('Initializing rover interface...')
rover = Rover(config)
rover.start()
logger.info('Connecting to {} in realm "{}"...'.format(address, realm))
runner = ApplicationRunner(address, realm, extra={
'config': config,
'rover': rover
})
# Start application
try:
runner.run(Component)
except NoRouteError:
logger.error('Error connecting to {} {}'.format(address, realm))
finally:
rover.stop()
if __name__ == '__main__':
main()
|
def notas(*args, situacao=False):
"""
=> Recebe várias notas de um aluno e retorna:
- A quantidade de notas recebidas
- A maior nota
- A menor nota
- A média da turma
- A situação académica (opcional)
:param notas: recebe várias notas
:param situacao: (opcional) mostra ou não a situação académica
:return: dicionario com estatistica das notas (e situação académica)
"""
pos = total = soma = maior = menor = media = 0.00
result = {}
# O código de 18 a 29 pode ser substituído pelas funções:
# len(args) -> total | max(args)/min(args) -> maior/menor
# sum(args)/len(args) -> média
for nota in args:
if pos == 0:
maior = menor = nota
else:
if nota > maior:
maior = nota
if nota < menor:
menor = nota
total += 1
soma += nota
pos += 1
media = round(soma/total, 2)
if media < 10:
classificacao = 'REPROVADO'
elif media >= 14:
classificacao = 'DISPENSADO'
else:
classificacao = 'APROVADO'
result['total'] = int(total)
result['maior'] = maior
result['menor'] = menor
result['media'] = media
if situacao == True:
result['classificacao'] = classificacao
return result
else:
return result
resp = notas(10, 14, 6.5, 8.5)
resp2 = notas(11, 14, 9.6, 8.3, 5.5, situacao=True)
print(resp)
print(resp2)
|
from helpers.kafka_helpers import create_consumer, poll_for_valid_message
from helpers.f142_logdata import LogData, Value, Double
from helpers.flatbuffer_helpers import check_message_pv_name_and_value_type
from time import sleep
def test_forwarder_sends_fake_pv_updates(docker_compose_fake_epics):
# A fake PV is defined in the config json file with channel name "FakePV"
consumer = create_consumer()
data_topic = "TEST_forward_fake_generated_pvs"
consumer.subscribe([data_topic])
sleep(5)
msg = poll_for_valid_message(consumer).value()
log_data_first = LogData.LogData.GetRootAsLogData(msg, 0)
# We should see PV updates in Kafka despite there being no IOC running
check_message_pv_name_and_value_type(log_data_first, Value.Value.Double, b'FakePV')
|
from django.apps import AppConfig
class SampleappConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'backend.sampleapp'
|
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import subprocess
from .platform_determiner_base import platform_determiner_base
from .linux_os_release import linux_os_release
from .linux_lsb_release import linux_lsb_release
class platform_determiner_linux(platform_determiner_base):
def __init__(self, platform):
impl = self._make_impl(platform)
if not impl:
raise RuntimeError('Unknown linux system: %s - %s' % (str(platform)))
self._impl = impl
@classmethod
def _make_impl(self, platform):
if linux_os_release.has_os_release():
filename, content = linux_os_release.read_os_release()
from .platform_determiner_linux_os_release import platform_determiner_linux_os_release
return platform_determiner_linux_os_release(platform, content, filename)
elif linux_lsb_release.has_lsb_release():
lsb_release = linux_lsb_release.lsb_release_output()
return platform_determiner_linux_lsb_release(platform, lsb_release)
else:
return None
@classmethod
def _etc_issue_content(clazz):
try:
with open('/etc/issue', 'r') as fin:
return fin.read()
except Exception as ex:
return None
#@abstractmethod
def system(self):
'system.'
return self._impl.system()
#@abstractmethod
def distro(self):
'distro.'
return self._impl.distro()
#@abstractmethod
def family(self):
'distro family.'
return self._impl.family()
#@abstractmethod
def version_major(self):
'distro version major.'
return self._impl.version_major()
#@abstractmethod
def version_minor(self):
'distro version minor.'
return self._impl.version_minor()
#@abstractmethod
def arch(self):
'arch.'
return self._impl.arch()
|
import click
import sys
from . import analysisPipeline
@click.group()
def main(args=None):
pass
@main.command()
@click.argument('inputdir', type=click.Path(exists=True))
@click.argument('phenodata', type=str)
def modelStep1(inputdir, phenodata=None):
"""
Run accordJP pipeline, starting launch model setup step 1
As of this moment, JYL -- FIXME
Make sure your phenotype data file is called '/home/accord/data/analysis/pheno_data.txt' before launching step 1
Update: File should be named "pheno_data_*****.txt" Step 1 will prompt user for pheno file name
Make sure you are in ~/accord/data/analysis when you start running this code
Print INPUTDIR if the directory exists.
Print PHENODATA from the input or use defaulty: pheno_data.txt
"""
click.echo(click.format_filename(inputdir))
click.echo(phenodata)
analysisPipeline.launchModelStep1(inputdir, phenodata)
@main.command()
@click.argument('inputdir', type=click.Path(exists=True))
def modelStep2(inputdir):
"""
Run accordJP pipeline, starting launch model setup step 2
As of this moment, JYL -- FIXME
Print INPUTDIR if the directory exists.
"""
click.echo(click.format_filename(inputdir))
analysisPipeline.launchModelStep2(inputdir)
if __name__ == "__main__":
main()
|
# pylint: disable=missing-docstring
import unittest
from pathlib import Path
from handsdown.processors.pep257 import PEP257DocstringProcessor
class TestLoader(unittest.TestCase):
def test_init(self):
pep257_docstring = (
Path(__file__).parent.parent / "static" / "pep257_docstring.txt"
).read_text()
processor = PEP257DocstringProcessor()
sections = processor.build_sections(pep257_docstring)
self.assertEqual(sections[""].title, "")
self.assertEqual(
sections[""].render(),
"Summary line.\n\nExtended description of method.\n\n",
)
|
import os
from typing import Callable
from unittest import TestCase
from tempfile import mkstemp
from ygo_core.deck import Deck
class TestDeck(TestCase):
def setUp(self) -> None:
self.deck = Deck()
def base_test_load(self, content: str, assertfunc: Callable[[], None]) -> None:
fd, name = mkstemp(suffix='.ydk')
with open(name, 'w') as f:
f.write(content)
try:
self.deck.load(name)
assertfunc()
except Exception as e:
raise e
finally:
os.close(fd)
os.remove(name)
def test_load_with_valid_suffixx(self) -> None:
assert self.deck.count_main == 0
assert self.deck.count_extra == 0
assert self.deck.count_side == 0
content = """
#created by llk
#main
#extra
!side
"""
self.base_test_load(content, lambda: self.assertEqual(0, self.deck.count_main))
def test_load_with_main_result_main_count_should_be_1(self) -> None:
content = """
#created by llk
#main
0
#extra
!side
"""
self.base_test_load(content, lambda: self.assertEqual(1, self.deck.count_main))
def test_load_with_main_result_extra_count_should_be_0(self) -> None:
content = """
#created by llk
#main
0
#extra
!side
"""
self.base_test_load(content, lambda: self.assertEqual(0, self.deck.count_extra))
def test_load_with_main_result_side_count_should_be_0(self) -> None:
content = """
#created by llk
#main
0
#extra
!side
"""
self.base_test_load(content, lambda: self.assertEqual(0, self.deck.count_side))
def test_load_with_extra_result_main_count_should_be_0(self) -> None:
content = """
#created by llk
#main
#extra
0
!side
"""
self.base_test_load(content, lambda: self.assertEqual(0, self.deck.count_main))
def test_load_with_extra_result_extra_count_should_be_1(self) -> None:
content = """
#created by llk
#main
#extra
0
!side
"""
self.base_test_load(content, lambda: self.assertEqual(1, self.deck.count_extra))
def test_load_with_extra_result_side_count_should_be_0(self) -> None:
content = """
#created by llk
#main
#extra
0
!side
"""
self.base_test_load(content, lambda: self.assertEqual(0, self.deck.count_side))
def test_load_with_side_result_main_count_should_be_0(self) -> None:
content = """
#created by llk
#main
#extra
!side
0
"""
self.base_test_load(content, lambda: self.assertEqual(0, self.deck.count_main))
def test_load_with_side_result_extra_count_should_be_0(self) -> None:
content = """
#created by llk
#main
#extra
!side
0
"""
self.base_test_load(content, lambda: self.assertEqual(0, self.deck.count_extra))
def test_load_with_side_result_side_count_should_be_1(self) -> None:
content = """
#created by llk
#main
#extra
!side
1
"""
self.base_test_load(content, lambda: self.assertEqual(1, self.deck.count_side))
def test_load_with_invalid_suffix(self) -> None:
fd, name = mkstemp(suffix='.pdf')
try:
self.assertRaises(ValueError, self.deck.load, name)
finally:
os.close(fd)
os.remove(name)
def test_load_with_unexist_path(self) -> None:
path = 'unexist'
if os.path.exists(path):
raise Exception(f'`path`:{path} already exists')
self.assertRaises(FileNotFoundError, self.deck.load, path)
def test_prop_count_main(self) -> None:
self.assertEqual(0, self.deck.count_main)
def test_prop_count_extra(self) -> None:
self.assertEqual(0, self.deck.count_extra)
def test_prop_count_side(self) -> None:
self.assertEqual(0, self.deck.count_side)
def test_prop_count(self) -> None:
self.assertEqual(0, self.deck.count)
|
class LeaderShape(Enum, IComparable, IFormattable, IConvertible):
"""
Supported geometric shapes of annotation leaders.
enum LeaderShape,values: Arc (2),Kinked (1),Straight (0)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
Arc = None
Kinked = None
Straight = None
value__ = None
|
"""pytorchfi.error_models provides different error models out-of-the-box for use."""
import logging
import torch
from pytorchfi import core
from pytorchfi.util import *
# Helper functions
def random_value(min_val=-1, max_val=1):
return random.uniform(min_val, max_val)
def random_weight_location(pfi, layer=-1):
if layer == -1:
layer = random.randint(0, pfi.get_total_layers() - 1)
dim = pfi.get_weights_dim(layer)
shape = pfi.get_weights_size(layer)
dim0_shape = shape[0]
k = random.randint(0, dim0_shape - 1)
if dim > 1:
dim1_shape = shape[1]
dim1_rand = random.randint(0, dim1_shape - 1)
if dim > 2:
dim2_shape = shape[2]
dim2_rand = random.randint(0, dim2_shape - 1)
else:
dim2_rand = None
if dim > 3:
dim3_shape = shape[3]
dim3_rand = random.randint(0, dim3_shape - 1)
else:
dim3_rand = None
return ([layer], [k], [dim1_rand], [dim2_rand], [dim3_rand])
# Weight Perturbation Models
def random_weight_inj(pfi, corrupt_layer=-1, min_val=-1, max_val=1):
layer, k, c_in, kH, kW = random_weight_location(pfi, corrupt_layer)
faulty_val = [random_value(min_val=min_val, max_val=max_val)]
return pfi.declare_weight_fi(
layer_num=layer, k=k, dim1=c_in, dim2=kH, dim3=kW, value=faulty_val
)
def zero_func_rand_weight(pfi):
layer, k, c_in, kH, kW = random_weight_location(pfi)
return pfi.declare_weight_fi(
function=_zero_rand_weight, layer_num=layer, k=k, dim1=c_in, dim2=kH, dim3=kW
)
def _zero_rand_weight(data, location):
new_data = data[location] * 0
return new_data
|
l = [10, 20, 30, 40, 50]
print(l[:2], l[2:])
print(l[::2])
s = slice(None, None, 2)
print(l[s])
l = list(range(10))
print(l)
print(l[2:5])
l[2:5] = [20, 30]
print(l)
print([1, 2] * 5)
board = [['_'] * 3 for i in range(3)]
print(board)
board[1][2] = 'X'
print(board)
weird_board = [['_'] * 3] * 3
print(weird_board)
weird_board[1][2] = 'O'
print(weird_board)
|
# Date: 28 Aug 2021
# Mini Python - For Beginners Exercise by #TechwithTim known as Tim Ruscica
# Youtube link: https://www.youtube.com/watch?v=DLn3jOsNRVE&t=1933s
# Exercice: Rock, Paper, Scissors
# My Exercise Objectives
# No.1 write out the code outlined by #TechwithTim exercise
# No.2 to flag identify what I learnt from each exercise
# No.3 add my own code commentary to start getting into good habits
# My learnings from Tim's Tutorial
# No.1 - syntax is very sentive (e.g. _underscore twice changes variable names)
# No.2 - use of a list
# No.3 - when I add the comment sections it helps show the code logic flow
# Introduction / Variable Setup
import random
user_wins = 0
computer_wins = 0
options = ["rock", "paper", "scissors"]
# Collect User Input
while True:
user_input = input("Type Rock/Paper/Scissors or Q to quit: ").lower()
if user_input == "q":
break
if user_input not in options:
continue
# Setup Computer Opponent
random_number = random.randint(0,2)
# rock: 0, paper: 1, scissors: 2
computer_pick = options[random_number]
print("Computer picked", computer_pick + ".")
# Playing Scenarios - User vs Computer
if (
user_input == "rock"
and computer_pick == "scissors"
or user_input == "paper"
and computer_pick == "rock"
or user_input == "scissors"
and computer_pick == "paper"
):
print("You won!")
user_wins += 1
else:
print("You lost")
computer_wins += 1
# Summary of results when user exits program
print("You won", user_wins, "times.")
print("The computer won", computer_wins, "times.")
print("Goodbye!")
|
class Solution:
def removeKdigits(self, num: str, k: int) -> str:
stk=[]
for i in num:
while k and stk and stk[-1]>i:
stk.pop()
k-=1
stk.append(i)
return ''.join(stk[:-k or None]).lstrip("0") or "0"
|
from typing import Any
from ..protocol import Observable, Observer, Subscription, rx_observer_from
from .rx_create import rx_create
from .rx_reduce import rx_reduce
__all__ = ["rx_avg"]
def rx_avg(observable: Observable) -> Observable:
"""Create an observable wich return the average items in the source when completes.
Args:
observable (observable): the observable source
Returns:
(Observable): observable instance
"""
_count = 0
async def accumulator(current, item):
nonlocal _count
_count += 1
return current + item
async def _subscribe(an_observer: Observer) -> Subscription:
reducer = rx_reduce(observable=observable, accumulator=accumulator, seed=0)
async def _on_next(item: Any):
nonlocal _count
if _count == 0:
await an_observer.on_error('No value emitted')
else:
await an_observer.on_next(item / _count)
return await reducer.subscribe(an_observer=rx_observer_from(observer=an_observer, on_next=_on_next))
return rx_create(subscribe=_subscribe)
|
# $Id: 411_fmtp_amrnb_offer_band_eff.py 3664 2011-07-19 03:42:28Z nanang $
import inc_sip as sip
import inc_sdp as sdp
# Answer for codec AMR should not contain fmtp octet-align=1
sdp = \
"""
v=0
o=- 3428650655 3428650655 IN IP4 192.168.1.9
s=pjmedia
c=IN IP4 192.168.1.9
t=0 0
a=X-nat:0
m=audio 4000 RTP/AVP 99 101
a=rtcp:4001 IN IP4 192.168.1.9
a=rtpmap:99 AMR/8000
a=sendrecv
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-15
"""
pjsua_args = "--null-audio --auto-answer 200 --add-codec AMR"
extra_headers = ""
include = [""]
exclude = ["octet-align=1"] # response must not include fmtp 'octet-align=1'
sendto_cfg = sip.SendtoCfg("AMR negotiation should not contain 'octet-align=1'", pjsua_args, sdp, 200,
extra_headers=extra_headers,
resp_inc=include, resp_exc=exclude)
|
from stix_shifter_modules.splunk.entry_point import EntryPoint
from unittest.mock import patch
import unittest
import json
import os
from stix_shifter.stix_transmission import stix_transmission
from stix_shifter_utils.utils.error_response import ErrorCode
class SplunkMockResponse:
def __init__(self, response_code, object):
self.code = response_code
self.object = object
def read(self):
return self.object
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.__init__')
class TestSplunkConnection(unittest.TestCase, object):
def test_is_async(self, mock_api_client):
mock_api_client.return_value = None
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
entry_point = EntryPoint(connection, config)
check_async = entry_point.is_async()
assert check_async
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.ping_box')
def test_ping_endpoint(self, mock_ping_response, mock_api_client):
mock_api_client.return_value = None
mocked_return_value = '["mock", "placeholder"]'
mock_ping_response.return_value = SplunkMockResponse(200, mocked_return_value)
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
transmission = stix_transmission.StixTransmission('splunk', connection, config)
ping_response = transmission.ping()
assert ping_response is not None
assert ping_response['success']
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.ping_box')
def test_ping_endpoint_exception(self, mock_ping_response, mock_api_client):
mock_api_client.return_value = None
mocked_return_value = '["mock", "placeholder"]'
mock_ping_response.return_value = SplunkMockResponse(200, mocked_return_value)
mock_ping_response.side_effect = Exception('exception')
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
transmission = stix_transmission.StixTransmission('splunk', connection, config)
ping_response = transmission.ping()
assert ping_response is not None
assert ping_response['success'] is False
assert ping_response['code'] == ErrorCode.TRANSMISSION_UNKNOWN.value
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.create_search')
def test_query_response(self, mock_query_response, mock_api_client):
mock_api_client.return_value = None
mocked_return_value = '{"sid":"1536672851.4012"}'
mock_query_response.return_value = SplunkMockResponse(201, mocked_return_value)
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
query = 'search eventtype=network_traffic | fields + tag| spath'
transmission = stix_transmission.StixTransmission('splunk', connection, config)
query_response = transmission.query(query)
assert query_response is not None
assert query_response['success'] is True
assert 'search_id' in query_response
assert query_response['search_id'] == "1536672851.4012"
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.create_search')
def test_query_response_exception(self, mock_query_response, mock_api_client):
mock_api_client.return_value = None
mocked_return_value = '{"sid":"1536672851.4012"}'
mock_query_response.return_value = SplunkMockResponse(201, mocked_return_value)
mock_query_response.side_effect = Exception('exception')
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
query = 'search eventtype=network_traffic | fields + tag| spath'
transmission = stix_transmission.StixTransmission('splunk', connection, config)
query_response = transmission.query(query)
assert query_response is not None
assert query_response['success'] is False
assert query_response['code'] == ErrorCode.TRANSMISSION_UNKNOWN.value
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.get_search', autospec=True)
def test_status_response(self, mock_status_response, mock_api_client):
mock_api_client.return_value = None
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(dir_path, 'api_response', 'status_by_sid.json')
mocked_return_value = open(file_path, 'r').read()
mock_status_response.return_value = SplunkMockResponse(200, mocked_return_value)
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
search_id = "1536832140.4293"
entry_point = EntryPoint(connection, config)
status_response = entry_point.create_status_connection(search_id)
assert status_response is not None
assert 'status' in status_response
assert status_response['status'] == 'COMPLETED'
assert 'progress' in status_response
assert status_response['progress'] == 100
assert 'success' in status_response
assert status_response['success'] is True
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.get_search', autospec=True)
def test_status_response_error(self, mock_status_response, mock_api_client):
mock_api_client.return_value = None
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(dir_path, 'api_response', 'status_by_sid_failed.json')
mocked_return_value = open(file_path, 'r').read()
mock_status_response.return_value = SplunkMockResponse(200, mocked_return_value)
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
search_id = "1536832140.4293"
entry_point = EntryPoint(connection, config)
status_response = entry_point.create_status_connection(search_id)
assert status_response is not None
assert 'status' in status_response
assert status_response['status'] == 'ERROR'
assert 'progress' in status_response
assert status_response['progress'] == 100
assert 'success' in status_response
assert status_response['success'] is True
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.get_search', autospec=True)
def test_status_response_running(self, mock_status_response, mock_api_client):
mock_api_client.return_value = None
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(dir_path, 'api_response', 'status_by_sid_running.json')
mocked_return_value = open(file_path, 'r').read()
mock_status_response.return_value = SplunkMockResponse(200, mocked_return_value)
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
search_id = "1536832140.4293"
entry_point = EntryPoint(connection, config)
status_response = entry_point.create_status_connection(search_id)
assert status_response is not None
assert 'status' in status_response
assert status_response['status'] == 'RUNNING'
assert 'progress' in status_response
assert status_response['progress'] == 100
assert 'success' in status_response
assert status_response['success'] is True
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.get_search', autospec=True)
def test_status_response_cancelled(self, mock_status_response, mock_api_client):
mock_api_client.return_value = None
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(dir_path, 'api_response', 'status_by_sid_running_cancel.json')
mocked_return_value = open(file_path, 'r').read()
mock_status_response.return_value = SplunkMockResponse(200, mocked_return_value)
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
search_id = "1536832140.4293"
entry_point = EntryPoint(connection, config)
status_response = entry_point.create_status_connection(search_id)
assert status_response is not None
assert 'status' in status_response
assert status_response['status'] == 'CANCELED'
assert 'progress' in status_response
assert status_response['progress'] == 100
assert 'success' in status_response
assert status_response['success'] is True
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.get_search', autospec=True)
def test_status_response_exception(self, mock_status_response, mock_api_client):
mock_api_client.return_value = None
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(dir_path, 'api_response', 'status_by_sid.json')
mocked_return_value = open(file_path, 'r').read()
mock_status_response.return_value = SplunkMockResponse(200, mocked_return_value)
mock_status_response.side_effect = Exception('exception')
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
search_id = "1536832140.4293"
transmission = stix_transmission.StixTransmission('splunk', connection, config)
status_response = transmission.status(search_id)
assert status_response is not None
assert status_response['success'] is False
assert ErrorCode.TRANSMISSION_UNKNOWN.value==status_response['code']
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.get_search_results', autospec=True)
def test_results_response(self, mock_results_response, mock_api_client):
mock_api_client.return_value = None
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(dir_path, 'api_response', 'result_by_sid.json')
mocked_return_value = open(file_path, 'r').read()
mock_results_response.return_value = SplunkMockResponse(200, mocked_return_value)
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
search_id = "1536832140.4293"
offset = 0
length = 1
transmission = stix_transmission.StixTransmission('splunk', connection, config)
results_response = transmission.results(search_id, offset, length)
assert 'success' in results_response
assert results_response['success'] is True
assert 'data' in results_response
assert len(results_response['data']) > 0
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.get_search_results',
autospec=True)
def test_results_response_empty_list(self, mock_results_response, mock_api_client):
mock_api_client.return_value = None
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(dir_path, 'api_response', 'empty_result_by_sid.json')
mocked_return_value = open(file_path, 'r').read()
mock_results_response.return_value = SplunkMockResponse(200, mocked_return_value)
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
search_id = "1536832140.4293"
offset = 0
length = 1
entry_point = EntryPoint(connection, config)
results_response = entry_point.create_results_connection(search_id, offset, length)
assert 'success' in results_response
assert results_response['success'] is True
assert 'data' in results_response
assert len(results_response['data']) == 0
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.get_search_results',
autospec=True)
def test_results_response_exception(self, mock_results_response, mock_api_client):
mock_api_client.return_value = None
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(dir_path, 'api_response', 'result_by_sid.json')
mocked_return_value = open(file_path, 'r').read()
mock_results_response.return_value = SplunkMockResponse(200, mocked_return_value)
mock_results_response.side_effect = Exception('exception')
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
search_id = "1536832140.4293"
offset = 0
length = 1
transmission = stix_transmission.StixTransmission('splunk', connection, config)
results_response = transmission.results(search_id, offset, length)
assert 'success' in results_response
assert results_response['success'] is False
assert results_response['code'] == ErrorCode.TRANSMISSION_UNKNOWN.value
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.create_search', autospec=True)
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.get_search', autospec=True)
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.get_search_results', autospec=True)
def test_query_flow(self, mock_results_response, mock_status_response, mock_query_response, mock_api_client):
mock_api_client.return_value = None
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
query_mock = '{"sid":"1536832140.4293"}'
mock_query_response.return_value = SplunkMockResponse(201, query_mock)
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(dir_path, 'api_response', 'result_by_sid.json')
results_mock = open(file_path, 'r').read()
mock_results_response.return_value = SplunkMockResponse(200, results_mock)
status_file_path = os.path.join(dir_path, 'api_response', 'status_by_sid.json')
status_mock = open(status_file_path, 'r').read()
mock_status_response.return_value = SplunkMockResponse(200, status_mock)
query = 'search eventtype=network_traffic | fields + tag| spath'
entry_point = EntryPoint(connection, config)
query_response = entry_point.create_query_connection(query)
assert query_response is not None
assert query_response['success'] is True
assert 'search_id' in query_response
assert query_response['search_id'] == "1536832140.4293"
search_id = "1536832140.4293"
status_response = entry_point.create_status_connection(search_id)
assert status_response is not None
assert 'status' in status_response
assert status_response['status'] == 'COMPLETED'
assert 'progress' in status_response
assert status_response['progress'] == 100
assert 'success' in status_response
assert status_response['success'] is True
search_id = "1536832140.4293"
offset = 0
length = 1
results_response = entry_point.create_results_connection(search_id, offset, length)
assert 'success' in results_response
assert results_response['success'] is True
assert 'data' in results_response
assert len(results_response['data']) > 0
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.delete_search', autospec=True)
def test_delete_search(self, mock_results_delete, mock_api_client):
mock_api_client.return_value = None
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
mocked_return_value = '{"messages":[{"type":"INFO","text":"Search job cancelled."}]}'
mock_results_delete.return_value = SplunkMockResponse(200, mocked_return_value)
search_id = "1536832140.4293"
transmission = stix_transmission.StixTransmission('splunk', connection, config)
results_response = transmission.delete(search_id)
assert results_response is not None
assert results_response['success'] is True
@patch('stix_shifter_modules.splunk.stix_transmission.api_client.APIClient.delete_search', autospec=True)
def test_delete_search_exception(self, mock_results_delete, mock_api_client):
mock_api_client.return_value = None
config = {
"auth": {
"username": "",
"password": ""
}
}
connection = {
"host": "host",
"port": 8080
}
mocked_return_value = '{"messages":[{"type":"INFO","text":"Unknown sid."}]}'
mock_results_delete.return_value = SplunkMockResponse(201, mocked_return_value)
search_id = "1536832140.4293"
transmission = stix_transmission.StixTransmission('splunk', connection, config)
results_response = transmission.delete(search_id)
assert results_response is not None
assert results_response['success'] is False
assert results_response['code'] == ErrorCode.TRANSMISSION_SEARCH_DOES_NOT_EXISTS.value
|
# pylint: disable = redefined-outer-name
from typing import Union
import py
import pytest
from universum import __main__
from .conftest import FuzzyCallChecker
from .git_utils import GitServer, GitClient, GitTestEnvironment
from .perforce_utils import PerforceWorkspace, P4TestEnvironment
from .utils import LocalTestEnvironment
def test_poll_local_vcs(tmpdir: py.path.local):
env = LocalTestEnvironment(tmpdir, "poll")
env.run()
def test_p4_success_command_line_no_changes(stdout_checker: FuzzyCallChecker,
perforce_workspace: PerforceWorkspace,
tmpdir: py.path.local):
db_file = tmpdir.join("p4poll.json")
result = __main__.main(["poll", "-ot", "term",
"-vt", "p4",
"-f", str(db_file),
"-p4p", perforce_workspace.server.port,
"-p4u", perforce_workspace.server.user,
"-p4P", perforce_workspace.server.password,
"-p4d", perforce_workspace.depot,
"-jtu", "https://localhost/?%s"])
assert result == 0
stdout_checker.assert_has_calls_with_param("==> No changes detected")
def test_git_success_command_line_no_changes(stdout_checker: FuzzyCallChecker,
git_server: GitServer,
tmpdir: py.path.local):
db_file = tmpdir.join("gitpoll.json")
result = __main__.main(["poll", "-ot", "term",
"-vt", "git",
"-f", str(db_file),
"-gr", git_server.url,
"-grs", git_server.target_branch,
"-jtu", "https://localhost/?%s"])
assert result == 0
stdout_checker.assert_has_calls_with_param("==> No changes detected")
def test_p4_error_command_line_wrong_port(stdout_checker: FuzzyCallChecker,
perforce_workspace: PerforceWorkspace,
tmpdir: py.path.local):
db_file = tmpdir.join("p4poll.json")
result = __main__.main(["poll", "-ot", "term",
"-vt", "p4",
"-f", str(db_file),
"-p4p", "127.0.0.1:1024",
"-p4u", perforce_workspace.server.user,
"-p4P", perforce_workspace.server.password,
"-p4d", perforce_workspace.depot,
"-jtu", "https://localhost/?%s"])
assert result != 0
stdout_checker.assert_has_calls_with_param("TCP connect to 127.0.0.1:1024 failed.")
def test_git_error_command_line_wrong_port(stdout_checker: FuzzyCallChecker,
git_server: GitServer,
tmpdir: py.path.local):
db_file = tmpdir.join("gitpoll.json")
result = __main__.main(["poll", "-ot", "term",
"-vt", "git",
"-f", str(db_file),
"-gr", "file:///non-existing-directory",
"-grs", git_server.target_branch,
"-jtu", "https://localhost/?%s"])
assert result != 0
stdout_checker.assert_has_calls_with_param("Cmd('git') failed due to: exit code(128)")
@pytest.fixture(params=["git", "p4"])
def poll_environment(request, perforce_workspace: PerforceWorkspace, git_client: GitClient, tmpdir: py.path.local):
if request.param == "git":
yield GitTestEnvironment(git_client, tmpdir, test_type="poll")
else:
yield P4TestEnvironment(perforce_workspace, tmpdir, test_type="poll")
def test_error_one_change(stdout_checker: FuzzyCallChecker, log_exception_checker: FuzzyCallChecker,
poll_environment: Union[GitTestEnvironment, P4TestEnvironment]):
# initialize working directory with initial data
poll_environment.run_with_http_server()
# make change in workspace
change = poll_environment.vcs_client.make_a_change()
# run poll again and fail triggering url because there is no server
poll_environment.run(expect_failure=True)
stdout_checker.assert_has_calls_with_param("==> Detected commit " + change)
# there is no listening server
log_exception_checker.assert_has_calls_with_param("[Errno 111] Connection refused")
def test_success_one_change(stdout_checker: FuzzyCallChecker, poll_environment: Union[GitTestEnvironment, P4TestEnvironment]):
# initialize working directory with initial data
poll_environment.run_with_http_server()
# make change in workspace
change = poll_environment.vcs_client.make_a_change()
collected_http = poll_environment.run_with_http_server()
collected_http.assert_request_was_made({"cl": [change]})
stdout_checker.assert_has_calls_with_param("==> Detected commit " + change)
def test_success_two_changes(stdout_checker: FuzzyCallChecker, poll_environment: Union[GitTestEnvironment, P4TestEnvironment]):
# initialize working directory with initial data
poll_environment.run_with_http_server()
# make changes in workspace
change1 = poll_environment.vcs_client.make_a_change()
change2 = poll_environment.vcs_client.make_a_change()
# run poll again and trigger the url twice
collected_http = poll_environment.run_with_http_server()
stdout_checker.assert_has_calls_with_param("==> Detected commit " + change1)
stdout_checker.assert_has_calls_with_param("==> Detected commit " + change2)
collected_http.assert_request_was_made({"cl": [change1]})
collected_http.assert_request_was_made({"cl": [change2]})
def test_changes_several_times(stdout_checker: FuzzyCallChecker, poll_environment: Union[GitTestEnvironment, P4TestEnvironment]):
# initialize working directory with initial data
poll_environment.run_with_http_server()
# make changes in workspace
change1 = poll_environment.vcs_client.make_a_change()
change2 = poll_environment.vcs_client.make_a_change()
# run poll and trigger the urls
collected_http = poll_environment.run_with_http_server()
stdout_checker.assert_has_calls_with_param("==> Detected commit " + change1)
stdout_checker.assert_has_calls_with_param("==> Detected commit " + change2)
collected_http.assert_request_was_made({"cl": [change1]})
collected_http.assert_request_was_made({"cl": [change2]})
# make more changes in workspace
stdout_checker.reset()
change3 = poll_environment.vcs_client.make_a_change()
change4 = poll_environment.vcs_client.make_a_change()
# run poll and trigger urls for the new changes only
collected_http = poll_environment.run_with_http_server()
stdout_checker.assert_has_calls_with_param("==> Detected commit " + change3)
stdout_checker.assert_has_calls_with_param("==> Detected commit " + change4)
stdout_checker.assert_absent_calls_with_param("==> Detected commit " + change1)
stdout_checker.assert_absent_calls_with_param("==> Detected commit " + change2)
collected_http.assert_request_was_made({"cl": [change3]})
collected_http.assert_request_was_made({"cl": [change4]})
collected_http.assert_request_was_not_made({"cl": [change1]})
collected_http.assert_request_was_not_made({"cl": [change2]})
|
from __future__ import absolute_import
# EAI fields
from builtins import object
EAI_ACL = 'eai:acl'
EAI_ATTRIBUTES = 'eai:attributes'
EAI_USER = 'eai:userName'
EAI_APP = 'eai:appName'
EAI_FIELD_PREFIX = 'eai:'
EAI_FIELDS = [EAI_ACL, EAI_ATTRIBUTES, EAI_USER, EAI_APP]
# elements of eai:attributes
EAI_ATTRIBUTES_OPTIONAL = 'optionalFields'
EAI_ATTRIBUTES_REQUIRED = 'requiredFields'
EAI_ATTRIBUTES_WILDCARD = 'wildcardFields'
class RestEAI(object):
def __init__(self, model, user, app, acl=None):
self.model = model
default_acl = {
'owner': user,
'app': app,
'global': 1,
'can_write': 1,
'modifiable': 1,
'removable': 1,
'sharing': 'global',
'perms': {'read': ['*'], 'write': ['admin']},
}
self.acl = acl or default_acl
self.user = user
self.app = app
self.attributes = self._build_attributes()
@property
def content(self):
return {
EAI_ACL: self.acl,
EAI_USER: self.user,
EAI_APP: self.app,
EAI_ATTRIBUTES: self.attributes,
}
def _build_attributes(self):
optional_fields = []
required_fields = []
for field in self.model.fields:
if field.required:
required_fields.append(field.name)
else:
optional_fields.append(field.name)
return {
EAI_ATTRIBUTES_OPTIONAL: optional_fields,
EAI_ATTRIBUTES_REQUIRED: required_fields,
EAI_ATTRIBUTES_WILDCARD: [],
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.