max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
ga4gh/search/compliance/util/local_server.py
|
ga4gh-discovery/ga4gh-search-compliance
| 0
|
12600
|
<filename>ga4gh/search/compliance/util/local_server.py
# -*- coding: utf-8 -*-
"""Module compliance_suite.report_server.py
This module contains class definition of small web server utility. Serves final
report results as HTML.
"""
import datetime
import time
import http.server
import socketserver
import os
import logging
import inspect
import socket
import webbrowser
import shutil
import sys
import threading
import json
import jinja2 as j2
import ga4gh.search.compliance as pkg_dir
from ga4gh.search.compliance.config.configuration import Configuration
def capitalize(text):
"""capitalizes a word, for use in rendering template
Args:
text (str): word to capitalize
Returns:
capitalized (str): capitalized word
"""
return text[0].upper() + text[1:]
def get_route_status(route_obj):
count_d = {
"incomplete": 0,
"pass": 0,
"fail": 0,
"warn": 0,
"skip": 0
}
symbol_d = {
"0": "incomplete",
"1": "pass",
"2": "warn",
"3": "fail",
"4": "skip",
}
ret = {
"btn": "btn-danger",
"text": "No Tests Run"
}
for test_case_report in route_obj["test_case_reports"]:
count_d[symbol_d[str(test_case_report["status"])]] += 1
if count_d["fail"] > 0 or count_d["skip"] > 0:
ret = {
"btn": "btn-danger",
"text": "%s Failed / %s Skipped" % (str(count_d["fail"]),
str(count_d["skip"]))
}
if count_d["pass"] > 0:
ret = {
"btn": "btn-success",
"text": "Pass"
}
return ret
class LocalServer(object):
"""Creates web server, serves test report as HTML
The ReportServer spins up a small, local web server to host test result
reports once the final JSON object has been generated. The server can be
shut down with CTRL+C.
Attributes:
port (Port): object representing free port to serve content
httpd (TCPServer): handle for web server
thread (Thread): thread serves content indefinitely, can be killed
safely from the outside via CTRL+C
web_dir (str): directory which host web files (CSS and generated HTML)
cwd (str): working directory to change back to after creating server
render_helper (dict): contains data structures and functions to be
passed to rendering engine to aid in rendering HTML
"""
def __init__(self):
"""instantiates a ReportServer object"""
self.port = None
self.httpd = None
self.thread = None
self.web_dir = Configuration.get_instance().get_output_dir()
self.web_resource_dir = os.path.join(
os.path.dirname(pkg_dir.__file__),
"web"
)
self.cwd = os.getcwd()
self.render_helper = {
"s": { # s: structures
"endpoints": [
"service_info",
"tables",
"table_info",
"table_data",
"search"
],
"formatted": {
"service_info": "Service Info",
"tables": "Tables",
"table_info": "Table Info",
"table_data": "Table Data",
"search": "Search"
},
"status": {
0: {
"status": "INCOMPLETE",
"css_class": "text-danger",
"fa_class": "fa-times-circle"
},
1: {
"status": "PASS",
"css_class": "text-success",
"fa_class": "fa-check-circle"
},
2: {
"status": "WARN",
"css_class": "text-danger",
"fa_class": "fa-times-circle"
},
3: {
"status": "FAIL",
"css_class": "text-danger",
"fa_class": "fa-times-circle"
},
4: {
"status": "SKIP",
"css_class": "text-info",
"fa_class": "fa-ban"
}
}
},
"f": { # f: functions
"capitalize": capitalize,
"format_test_name": lambda text: " ".join(
[capitalize(t) for t in text.split("_")]
),
"server_name_url": lambda name: \
name.lower().replace(" ", "") + ".html",
"rm_space": lambda text: text.replace(" ", "_")\
.replace(",", ""),
"timestamp": lambda: \
datetime.datetime.now(datetime.timezone.utc)\
.strftime("%B %d, %Y at %l:%M %p (%Z)"),
"route_status": get_route_status
}
}
def setup(self):
self.__set_free_port()
self.__copy_web_resource_dir()
self.__render_html()
def serve(self, uptime=3600):
"""serves server as separate thread so it can be stopped from outside
Args:
uptime (int): server will remain up for this time in seconds unless
shutdown by user
"""
try:
self.thread = threading.Thread(target=self.__start_mock_server,
args=(uptime,))
self.thread.start()
time.sleep(uptime)
except KeyboardInterrupt as e:
print("stopping server")
finally:
self.httpd.shutdown()
os.chdir(self.cwd)
def __set_free_port(self):
"""get free port on local machine on which to run the report server
This function is used in conftest and the return of this is a free port
available in the system on which the mock server will be run. This port
will be passed to start_mock_server as a required parameter from
conftest.py
Returns:
(Port): free port on which to run server
"""
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
self.port = port
def __copy_web_resource_dir(self):
for subdir in ["public"]:
src = os.path.join(self.web_resource_dir, subdir)
dst = os.path.join(self.web_dir, subdir)
shutil.copytree(src, dst)
def __render_html(self):
data = None
with open(self.web_dir + "/ga4gh-search-compliance-report.json", "r") as f:
data = json.load(f)
# set up jinja2 rendering engine
view_loader = j2.FileSystemLoader(searchpath=self.web_resource_dir)
view_env = j2.Environment(loader=view_loader)
# render the index/homepage
home_template = view_env.get_template("views/home.html")
home_rendered = home_template.render(data=data, h=self.render_helper)
home_path = self.web_dir + "/index.html"
open(home_path, "w").write(home_rendered)
for server_report in data["server_reports"]:
report_template = view_env.get_template("views/report.html")
report_rendered = report_template.render(server_report=server_report,
h=self.render_helper)
report_path = self.web_dir + "/" + \
self.render_helper["f"]["server_name_url"](server_report["name"])
open(report_path, "w").write(report_rendered)
def __start_mock_server(self, uptime):
"""run server to serve final test report
Args:
port (Port): port on which to run the server
"""
os.chdir(self.web_dir)
Handler = http.server.SimpleHTTPRequestHandler
self.httpd = socketserver.TCPServer(("", self.port), Handler)
logging.info("serving at http://localhost:" + str(self.port))
webbrowser.open("http://localhost:" + str(self.port))
logging.info("server will shut down after " + str(uptime) + " seconds, "
+ "press CTRL+C to shut down manually")
self.httpd.serve_forever()
| 2.53125
| 3
|
api/routefinder.py
|
shingkid/DrWatson-ToTheRescue_SCDFXIBM
| 1
|
12601
|
import csv
import pandas as pd
import numpy as np
import networkx as nx
class RouteFinder():
def __init__(self):
G = nx.Graph()
with open('data/node_pairs.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
# add edges
G.add_edge(row[0],row[1])
self.G = G
def reset_graph(self):
G = nx.Graph()
with open('data/node_pairs.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
# add edges
G.add_edge(row[0],row[1])
self.G = G
def remove_node(self,nodes):
self.G.remove_nodes_from(nodes)
def optimal_route(self,source,target):
return nx.shortest_path(self.G, source, target)
def optimal_entry_route(self,target):
exits = ['Exit_4','Exit_3','Exit_2','Exit_1']
optimal_route = []
shortest_path_length = 0
for exit in exits:
try:
curr_path = nx.shortest_path(self.G, exit, target)
curr_length = len(curr_path)
if shortest_path_length == 0 or curr_length < shortest_path_length:
optimal_route = curr_path
shortest_path_length = curr_length
except:
msg = 'No paths found'
if shortest_path_length == 0:
return msg
return optimal_route
def optimal_exit_route(self,source):
exits = ['Exit_1','Exit_2','Exit_3','Exit_4']
optimal_route = []
shortest_path_length = 0
for exit in exits:
try:
curr_path = nx.shortest_path(self.G, source, exit)
curr_length = len(curr_path)
if shortest_path_length == 0 or curr_length < shortest_path_length:
optimal_route = curr_path
shortest_path_length = curr_length
except:
msg = 'No paths found'
if shortest_path_length == 0:
return msg
return optimal_route
| 3.203125
| 3
|
lectures/extensions/hyperbolic_discounting/replication_code/.mywaflib/waflib/extras/rst.py
|
loikein/ekw-lectures
| 4
|
12602
|
#!/usr/bin/env python
# <NAME>, 2013 (zougloub)
"""
reStructuredText support (experimental)
Example::
def configure(conf):
conf.load('rst')
if not conf.env.RST2HTML:
conf.fatal('The program rst2html is required')
def build(bld):
bld(
features = 'rst',
type = 'rst2html', # rst2html, rst2pdf, ...
source = 'index.rst', # mandatory, the source
deps = 'image.png', # to give additional non-trivial dependencies
)
By default the tool looks for a set of programs in PATH.
The tools are defined in `rst_progs`.
To configure with a special program use::
$ RST2HTML=/path/to/rst2html waf configure
This tool is experimental; don't hesitate to contribute to it.
"""
import re
from waflib import Errors
from waflib import Logs
from waflib import Node
from waflib import Task
from waflib import Utils
from waflib.TaskGen import before_method
from waflib.TaskGen import feature
rst_progs = "rst2html rst2xetex rst2latex rst2xml rst2pdf rst2s5 rst2man rst2odt rst2rtf".split()
def parse_rst_node(task, node, nodes, names, seen, dirs=None):
# TODO add extensibility, to handle custom rst include tags...
if dirs is None:
dirs = (node.parent, node.get_bld().parent)
if node in seen:
return
seen.append(node)
code = node.read()
re_rst = re.compile(
r"^\s*.. ((?P<subst>\|\S+\|) )?(?P<type>include|image|figure):: (?P<file>.*)$", re.M
)
for match in re_rst.finditer(code):
ipath = match.group("file")
itype = match.group("type")
Logs.debug("rst: visiting %s: %s", itype, ipath)
found = False
for d in dirs:
Logs.debug("rst: looking for %s in %s", ipath, d.abspath())
found = d.find_node(ipath)
if found:
Logs.debug("rst: found %s as %s", ipath, found.abspath())
nodes.append((itype, found))
if itype == "include":
parse_rst_node(task, found, nodes, names, seen)
break
if not found:
names.append((itype, ipath))
class docutils(Task.Task):
"""
Compile a rst file.
"""
def scan(self):
"""
A recursive regex-based scanner that finds rst dependencies.
"""
nodes = []
names = []
seen = []
node = self.inputs[0]
if not node:
return (nodes, names)
parse_rst_node(self, node, nodes, names, seen)
Logs.debug("rst: %r: found the following file deps: %r", self, nodes)
if names:
Logs.warn("rst: %r: could not find the following file deps: %r", self, names)
return ([v for (t, v) in nodes], [v for (t, v) in names])
def check_status(self, msg, retcode):
"""
Check an exit status and raise an error with a particular message
:param msg: message to display if the code is non-zero
:type msg: string
:param retcode: condition
:type retcode: boolean
"""
if retcode != 0:
raise Errors.WafError(f"{msg!r} command exit status {retcode!r}")
def run(self):
"""
Runs the rst compilation using docutils
"""
raise NotImplementedError()
class rst2html(docutils):
color = "BLUE"
def __init__(self, *args, **kw):
docutils.__init__(self, *args, **kw)
self.command = self.generator.env.RST2HTML
self.attributes = ["stylesheet"]
def scan(self):
nodes, names = docutils.scan(self)
for attribute in self.attributes:
stylesheet = getattr(self.generator, attribute, None)
if stylesheet is not None:
ssnode = self.generator.to_nodes(stylesheet)[0]
nodes.append(ssnode)
Logs.debug("rst: adding dep to %s %s", attribute, stylesheet)
return nodes, names
def run(self):
cwdn = self.outputs[0].parent
src = self.inputs[0].path_from(cwdn)
dst = self.outputs[0].path_from(cwdn)
cmd = self.command + [src, dst]
cmd += Utils.to_list(getattr(self.generator, "options", []))
for attribute in self.attributes:
stylesheet = getattr(self.generator, attribute, None)
if stylesheet is not None:
stylesheet = self.generator.to_nodes(stylesheet)[0]
cmd += ["--%s" % attribute, stylesheet.path_from(cwdn)]
return self.exec_command(cmd, cwd=cwdn.abspath())
class rst2s5(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2S5
self.attributes = ["stylesheet"]
class rst2latex(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2LATEX
self.attributes = ["stylesheet"]
class rst2xetex(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2XETEX
self.attributes = ["stylesheet"]
class rst2pdf(docutils):
color = "BLUE"
def run(self):
cwdn = self.outputs[0].parent
src = self.inputs[0].path_from(cwdn)
dst = self.outputs[0].path_from(cwdn)
cmd = self.generator.env.RST2PDF + [src, "-o", dst]
cmd += Utils.to_list(getattr(self.generator, "options", []))
return self.exec_command(cmd, cwd=cwdn.abspath())
@feature("rst")
@before_method("process_source")
def apply_rst(self):
"""
Create :py:class:`rst` or other rst-related task objects
"""
if self.target:
if isinstance(self.target, Node.Node):
tgt = self.target
elif isinstance(self.target, str):
tgt = self.path.get_bld().make_node(self.target)
else:
self.bld.fatal(
f"rst: Don't know how to build target name {self.target} which is not a string or Node for {self}"
)
else:
tgt = None
tsk_type = getattr(self, "type", None)
src = self.to_nodes(self.source)
assert len(src) == 1
src = src[0]
if tsk_type is not None and tgt is None:
if tsk_type.startswith("rst2"):
ext = tsk_type[4:]
else:
self.bld.fatal("rst: Could not detect the output file extension for %s" % self)
tgt = src.change_ext(".%s" % ext)
elif tsk_type is None and tgt is not None:
out = tgt.name
ext = out[out.rfind(".") + 1 :]
self.type = "rst2" + ext
elif tsk_type is not None and tgt is not None:
# the user knows what he wants
pass
else:
self.bld.fatal("rst: Need to indicate task type or target name for %s" % self)
deps_lst = []
if getattr(self, "deps", None):
deps = self.to_list(self.deps)
for filename in deps:
n = self.path.find_resource(filename)
if not n:
self.bld.fatal(f"Could not find {filename!r} for {self!r}")
if not n in deps_lst:
deps_lst.append(n)
try:
task = self.create_task(self.type, src, tgt)
except KeyError:
self.bld.fatal(f"rst: Task of type {self.type} not implemented (created by {self})")
task.env = self.env
# add the manual dependencies
if deps_lst:
try:
lst = self.bld.node_deps[task.uid()]
for n in deps_lst:
if not n in lst:
lst.append(n)
except KeyError:
self.bld.node_deps[task.uid()] = deps_lst
inst_to = getattr(self, "install_path", None)
if inst_to:
self.install_task = self.add_install_files(install_to=inst_to, install_from=task.outputs[:])
self.source = []
def configure(self):
"""
Try to find the rst programs.
Do not raise any error if they are not found.
You'll have to use additional code in configure() to die
if programs were not found.
"""
for p in rst_progs:
self.find_program(p, mandatory=False)
| 2.375
| 2
|
mofa/analytics/tests/test_participationAnalytics/test_quizParticipation.py
|
BoxInABoxICT/BoxPlugin
| 0
|
12603
|
<gh_stars>0
import unittest
import json
import os
from unittest.mock import MagicMock, patch
from analytics.src.participationAnalytics import quizParticipation
class TestQuizParticipation(unittest.TestCase):
@patch("analytics.src.participationAnalytics.quizParticipation.runQuery")
def test_generateData(self, lrs_mock):
"""
Tests if the analysis is performed correctly
"""
# Setup mock for database query
d = os.path.dirname(os.path.realpath(__file__))
f = open(f'{d}/quizQuery.json')
lrs_mock.side_effect = [json.load(f)]
# Run the test
correct_result = {
"http://localhost/mod/quiz/view.php?id=1": 3,
"http://localhost/mod/quiz/view.php?id=2": 1,
"http://localhost/mod/quiz/view.php?id=5": 1
}
actual_result = quizParticipation.generateData(0)
self.assertEqual(correct_result, actual_result)
@patch("analytics.src.participationAnalytics.quizParticipation.runQuery")
def test_generateData_error(self, lrs_mock):
"""
Tests if an error is passed trough correctly
"""
# Setup mock for database query
error = {"error": "mock error"}
lrs_mock.side_effect = [error]
# Run test
self.assertEqual(error, quizParticipation.generateData(2))
| 2.6875
| 3
|
visualize_cam.py
|
mhamdan91/Gradcam_eager
| 2
|
12604
|
# from utils import Sample_main
import gradcam_main
import numpy as np
import tensorflow as tf
import argparse
import os
tf.logging.set_verbosity(tf.logging.ERROR) # disable to see tensorflow warnings
def cam(in_path='sample.bmp', out_path = 'sample.png',):
gradcam_main.cam_vis(in_path, out_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_image', default='sample.bmp', type=str, help= '(Full name of the input image -- default set to sample.bmp')
parser.add_argument('-o', '--output_image', default='sample.png', type=str, help='Full name of output image (should be .png) -- default set to '
'input_image.png')
args = parser.parse_args()
if args.input_image != 'sample.bmp' and args.output_image == 'sample.png':
out_name = args.input_image
out_name = out_name.replace('bmp', 'png')
else:
out_name = args.output_image
out_name = out_name.replace('bmp', 'png')
cam(args.input_image, out_name)
# In case referenced by other modules
if __name__ == '__main__':
main()
| 2.125
| 2
|
tests/molecular/molecules/building_block/test_with_functional_groups.py
|
andrewtarzia/stk
| 21
|
12605
|
from ..utilities import (
has_same_structure,
is_equivalent_molecule,
is_equivalent_building_block,
are_equivalent_functional_groups,
)
def test_with_functional_groups(building_block, get_functional_groups):
"""
Test :meth:`.BuildingBlock.with_functional_groups`.
Parameters
----------
building_block : :class:`.BuildingBlock`
The building block to test.
get_functional_groups : :class:`callable`
Takes a single parameter, `building_block` and returns the
`functional_groups` parameter to use for this test.
Returns
-------
None : :class:`NoneType`
"""
# Save clone to check immutability.
clone = building_block.clone()
_test_with_functional_groups(
building_block=building_block,
functional_groups=tuple(get_functional_groups(building_block)),
)
is_equivalent_building_block(building_block, clone)
has_same_structure(building_block, clone)
def _test_with_functional_groups(building_block, functional_groups):
"""
Test :meth:`.BuildingBlock.with_functional_groups`.
Parameters
----------
building_block : :class:`.BuildingBlock`
The building block to test.
functional_groups : :class:`tuple` of :class:`.FunctionalGroup`
The functional groups the new building block should hold.
Returns
-------
None : :class:`NoneType`
"""
new = building_block.with_functional_groups(functional_groups)
are_equivalent_functional_groups(
new.get_functional_groups(),
functional_groups,
)
is_equivalent_molecule(building_block, new)
has_same_structure(building_block, new)
| 2.40625
| 2
|
tests/master/test_master.py
|
bk-mtg/piwheels
| 0
|
12606
|
<reponame>bk-mtg/piwheels
# The piwheels project
# Copyright (c) 2017 <NAME> <https://github.com/bennuttall>
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
from unittest import mock
from threading import Thread
import pytest
from conftest import find_message
from piwheels import __version__, protocols, transport
from piwheels.master import main, const
@pytest.fixture()
def mock_pypi(request):
with mock.patch('xmlrpc.client.ServerProxy') as proxy:
proxy().changelog_since_serial.return_value = []
yield proxy
@pytest.fixture()
def mock_signal(request):
with mock.patch('signal.signal') as signal:
yield signal
@pytest.fixture()
def mock_context(request, zmq_context):
with mock.patch('piwheels.transport.Context') as ctx_mock:
# Pass thru calls to Context.socket, but ignore everything else (in
# particular, destroy and term calls as we want the testing context to
# stick around)
ctx_mock().socket.side_effect = zmq_context.socket
yield ctx_mock
@pytest.fixture()
def master_thread(request, mock_pypi, mock_context, mock_systemd, mock_signal,
tmpdir, db_url, db, with_schema):
main_thread = None
def _master_thread(args=None):
nonlocal main_thread
if args is None:
args = []
main_thread = Thread(daemon=True, target=main, args=([
'--dsn', db_url,
'--output-path', str(tmpdir.join('output')),
'--status-queue', 'ipc://' + str(tmpdir.join('status-queue')),
'--control-queue', 'ipc://' + str(tmpdir.join('control-queue')),
'--slave-queue', 'ipc://' + str(tmpdir.join('slave-queue')),
'--file-queue', 'ipc://' + str(tmpdir.join('file-queue')),
'--import-queue', 'ipc://' + str(tmpdir.join('import-queue')),
'--log-queue', 'ipc://' + str(tmpdir.join('log-queue')),
] + list(args),))
return main_thread
yield _master_thread
if main_thread is not None and main_thread.is_alive():
with mock_context().socket(
transport.PUSH, protocol=reversed(protocols.master_control)) as control:
control.connect('ipc://' + str(tmpdir.join('control-queue')))
control.send_msg('QUIT')
main_thread.join(10)
assert not main_thread.is_alive()
@pytest.fixture()
def master_control(request, tmpdir, mock_context):
control = mock_context().socket(
transport.PUSH, protocol=reversed(protocols.master_control))
control.connect('ipc://' + str(tmpdir.join('control-queue')))
yield control
control.close()
def test_help(capsys):
with pytest.raises(SystemExit):
main(['--help'])
out, err = capsys.readouterr()
assert out.startswith('usage:')
assert '--pypi-xmlrpc' in out
def test_version(capsys):
with pytest.raises(SystemExit):
main(['--version'])
out, err = capsys.readouterr()
assert out.strip() == __version__
def test_no_root(caplog):
with mock.patch('os.geteuid') as geteuid:
geteuid.return_value = 0
assert main([]) != 0
assert find_message(caplog.records,
message='Master must not be run as root')
def test_quit_control(mock_systemd, master_thread, master_control):
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
master_control.send_msg('QUIT')
thread.join(10)
assert not thread.is_alive()
def test_system_exit(mock_systemd, master_thread, caplog):
with mock.patch('piwheels.master.PiWheelsMaster.main_loop') as main_loop:
main_loop.side_effect = SystemExit(1)
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
thread.join(10)
assert not thread.is_alive()
assert find_message(caplog.records, message='shutting down on SIGTERM')
def test_system_ctrl_c(mock_systemd, master_thread, caplog):
with mock.patch('piwheels.master.PiWheelsMaster.main_loop') as main_loop:
main_loop.side_effect = KeyboardInterrupt()
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
thread.join(10)
assert not thread.is_alive()
assert find_message(caplog.records, message='shutting down on Ctrl+C')
def test_bad_control(mock_systemd, master_thread, master_control, caplog):
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
master_control.send(b'FOO')
master_control.send_msg('QUIT')
thread.join(10)
assert not thread.is_alive()
assert find_message(caplog.records, message='unable to deserialize data')
def test_status_passthru(tmpdir, mock_context, mock_systemd, master_thread):
with mock_context().socket(transport.PUSH, protocol=protocols.monitor_stats) as int_status, \
mock_context().socket(transport.SUB, protocol=reversed(protocols.monitor_stats)) as ext_status:
ext_status.connect('ipc://' + str(tmpdir.join('status-queue')))
ext_status.subscribe('')
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
# Wait for the first statistics message (from BigBrother) to get the
# SUB queue working
msg, data = ext_status.recv_msg()
assert msg == 'STATS'
data['builds_count'] = 12345
int_status.connect(const.INT_STATUS_QUEUE)
int_status.send_msg('STATS', data)
# Try several times to read the passed-thru message; other messages
# (like stats from BigBrother) will be sent to ext-status too
for i in range(3):
msg, copy = ext_status.recv_msg()
if msg == 'STATS':
assert copy == data
break
else:
assert False, "Didn't see modified STATS passed-thru"
def test_kill_control(mock_systemd, master_thread, master_control):
with mock.patch('piwheels.master.SlaveDriver.kill_slave') as kill_slave:
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
master_control.send_msg('KILL', 1)
master_control.send_msg('QUIT')
thread.join(10)
assert not thread.is_alive()
assert kill_slave.call_args == mock.call(1)
def test_pause_resume(mock_systemd, master_thread, master_control, caplog):
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
master_control.send_msg('PAUSE')
master_control.send_msg('RESUME')
master_control.send_msg('QUIT')
thread.join(10)
assert not thread.is_alive()
assert find_message(caplog.records, message='pausing operations')
assert find_message(caplog.records, message='resuming operations')
def test_new_monitor(mock_systemd, master_thread, master_control, caplog):
with mock.patch('piwheels.master.SlaveDriver.list_slaves') as list_slaves:
thread = master_thread()
thread.start()
assert mock_systemd._ready.wait(10)
master_control.send_msg('HELLO')
master_control.send_msg('QUIT')
thread.join(10)
assert not thread.is_alive()
assert find_message(caplog.records,
message='sending status to new monitor')
assert list_slaves.call_args == mock.call()
def test_debug(mock_systemd, master_thread, master_control, caplog):
thread = master_thread(args=['--debug', 'master.the_scribe',
'--debug', 'master.the_architect'])
thread.start()
assert mock_systemd._ready.wait(10)
master_control.send_msg('QUIT')
thread.join(10)
assert not thread.is_alive()
assert find_message(caplog.records, name='master.the_scribe',
levelname='DEBUG', message='<< QUIT None')
assert find_message(caplog.records, name='master.the_architect',
levelname='DEBUG', message='<< QUIT None')
| 1.46875
| 1
|
dibase/rpi/gpio/test/pinid-platformtests.py
|
ralph-mcardell/dibase-rpi-python
| 0
|
12607
|
'''
Part of the dibase.rpi.gpio.test package.
GPIO pin id support classes' platform tests.
Underlying GPIO pin ids are those used by the Linux gpiolib and used
to identify a device's GPIO pins in the Linux sys filesystem GPIO
sub-tree.
Developed by <NAME> / Dibase Limited.
Copyright (c) 2012 Dibase Limited
License: dual: GPL or BSD.
'''
import unittest
import sys
if __name__ == '__main__':
# Add path to directory containing the dibase package directory
sys.path.insert(0, './../../../..')
from dibase.rpi.gpio import pinid
class PinIdRPiPlatforrmTestCases(unittest.TestCase):
def test_0000_get_rpi_major_revision_index_returns_zero_or_positive_int(self):
returned_rev_index = pinid.PinId._get_rpi_major_revision_index()
self.assertIsNotNone(returned_rev_index)
self.assertIsInstance(returned_rev_index,int)
self.assertTrue(returned_rev_index>=0)
def test_0020_PinId_value_of_p1_sda_0_or_2(self):
rev_index = pinid.PinId._get_rpi_major_revision_index()
p1_sda_gpio_id = pinid.PinId.p1_sda()
self.assertTrue((rev_index==0 and p1_sda_gpio_id==0) or p1_sda_gpio_id==2)
if __name__ == '__main__':
unittest.main()
| 2.46875
| 2
|
analysis.py
|
tj294/2.5D-RB
| 0
|
12608
|
"""
Analysis code for plotting vertical flux transport and/or a gif of temperature,
velocity and KE from the merged output of a Dedalus Rayleigh-Bérnard code.
Author: <NAME>
"""
# ====================
# IMPORTS
# ====================
import numpy as np
import h5py
import argparse
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pathlib
import os
import shutil
import time
import imageio
from dedalus import public as de
from dedalus.tools import post
# ====================
# CLA PARSING
# ====================
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", "--input", help="Folder where the processing data is stored", required=True
)
parser.add_argument(
"-t", "--heatmap", help="Plot a gif of the temperature heatmap", action="store_true"
)
parser.add_argument(
"-f", "--flux", help="Plot the average flux contributions", action="store_true"
)
parser.add_argument(
"-k", "--KE", help="Plot the kinetic energy only", action="store_true"
)
args = parser.parse_args()
direc = os.path.normpath(args.input) + "/"
with h5py.File(direc + "run_params/run_params_s1.h5", "r") as f:
a = int(np.array(f["tasks"]["a"]))
y = de.Fourier("y", 256, interval=(0, a), dealias=3 / 2)
z = de.Chebyshev("z", 64, interval=(0, 1), dealias=3 / 2)
y = np.array(y.grid(1))
z = np.array(z.grid(1))
# ====================
# Plot Fluxes
# ====================
if args.flux:
avg_t_start = float(input("Start average at: "))
avg_t_stop = float(input("End average at: "))
with h5py.File(direc + "analysis/analysis_s1.h5", "r") as file:
L_cond_arr = np.array(file["tasks"]["L_cond"])[:, 0]
L_conv_arr = np.array(file["tasks"]["L_conv"])[:, 0]
KE = np.array(file["tasks"]["KE"])[:, 0]
snap_t = np.array(file["scales"]["sim_time"])
if (
(avg_t_start <= snap_t[0])
or (avg_t_start >= snap_t[-1])
or (avg_t_stop <= snap_t[0])
or (avg_t_stop >= snap_t[-1])
):
print(
"Average time period out of simulation range: {} -> {}".format(
snap_t[0], snap_t[-1]
)
)
pass
ASI = np.abs(snap_t - avg_t_start).argmin()
if np.isnan(avg_t_stop):
AEI = -1
else:
AEI = np.abs(snap_t - avg_t_stop).argmin()
avg_t_range = snap_t[AEI] - snap_t[ASI]
print("Averaging between {} and {}".format(snap_t[ASI], snap_t[AEI]))
mean_L_cond = np.mean(np.array(L_cond_arr[ASI:AEI]), axis=0)
mean_L_conv = np.mean(np.array(L_conv_arr[ASI:AEI]), axis=0)
mean_L_tot = mean_L_cond + mean_L_conv
del_L = np.max(np.abs(1.0 - mean_L_tot))
print("max del_L = {}".format(del_L))
fig = plt.figure(figsize=(6, 6))
KE_ax = fig.add_subplot(311)
KE_ax.plot(snap_t, KE, "k", label="Kinetic Energy")
KE_ax.set_xlabel(r"time [$\tau_\kappa$]")
KE_ax.set_ylabel("KE")
KE_ax.axvspan(
snap_t[ASI], snap_t[AEI], color="r", alpha=0.5, label="Flux averaging"
)
L_ax = fig.add_subplot(212)
L_ax.plot(z, mean_L_cond, "r", linestyle="-", label=r"$L_{cond}$")
L_ax.plot(z, mean_L_conv, "g", linestyle="-", label=r"$L_{conv}$")
L_ax.plot(z, mean_L_tot, "k", ls="-", label=r"$L_{total}$")
L_ax.set_xlabel("z")
L_ax.set_ylabel("L")
L_ax.legend()
plt.savefig(direc + "fluxes.png")
plt.show()
plt.close()
# ====================
# Plot heatmap
# ====================
if args.heatmap:
filenames = []
os.makedirs(direc + "figure", exist_ok=True)
with h5py.File(direc + "analysis/analysis_s1.h5", "r") as file:
KE = np.array(file["tasks"]["KE"])[:, 0]
with h5py.File(direc + "snapshots/snapshots_s1.h5", "r") as file:
T = np.array(file["tasks"]["T"])
v = np.array(file["tasks"]["v"])
w = np.array(file["tasks"]["w"])
snap_t = np.array(file["scales"]["sim_time"])
snap_iter = np.array(file["scales"]["iteration"])
yy, zz = np.meshgrid(y, z)
maxT = np.max(T)
maxV = np.max(v)
maxW = np.max(w)
n_iter = len(T[:, 0:, 0])
start_time = time.time()
print("Plotting {} graphs".format(n_iter))
try:
for i in range(0, int(n_iter)):
fig = plt.figure(figsize=(8, 6))
gs = gridspec.GridSpec(ncols=2, nrows=3, figure=fig)
T_ax = fig.add_subplot(gs[0:2, 0])
v_ax = fig.add_subplot(gs[0, 1])
w_ax = fig.add_subplot(gs[1, 1])
KE_ax = fig.add_subplot(gs[2, :])
if (i % 50 == 0) and (i != 0):
sec_per_frame = (time.time() - start_time) / i
eta = sec_per_frame * (n_iter - i)
print(
"image {}/{} at {:.3f}ips \t| ETA in {}m {}s".format(
i, n_iter, sec_per_frame, int(eta // 60), int(eta % 60)
)
)
fig.suptitle(
"Iteration: {}\n".format(snap_iter[i])
+ r"Sim Time: {:.2f} $\tau_\kappa$".format(snap_t[i])
)
c1 = v_ax.contourf(
yy,
zz,
np.transpose(v[i, :, :]),
levels=np.linspace(np.min(v), maxV),
cmap="coolwarm",
)
c1_bar = fig.colorbar(c1, ax=v_ax)
c1_bar.set_label("v", rotation=0)
v_ax.set_ylabel("z")
v_ax.set_xlabel("y")
v_ax.invert_xaxis()
c2 = w_ax.contourf(
yy,
zz,
np.transpose(w[i, :, :]),
levels=np.linspace(np.min(w), maxW),
cmap="coolwarm",
)
c2_bar = fig.colorbar(c2, ax=w_ax)
c2_bar.set_label("w", rotation=0)
w_ax.set_ylabel("z")
w_ax.set_xlabel("y")
w_ax.invert_xaxis()
c3 = T_ax.contourf(
yy,
zz,
np.transpose(T[i, :, :]),
levels=np.linspace(0, maxT),
cmap="coolwarm",
)
c3_bar = fig.colorbar(c3, ax=T_ax)
c3_bar.set_label("T", rotation=0)
T_ax.set_ylabel("z")
T_ax.set_xlabel("y")
T_ax.invert_xaxis()
KE_ax.plot(snap_t[:i], KE[:i], "k")
KE_ax.set_xlabel(r"time [$\tau_\kappa$]")
KE_ax.set_ylabel("KE")
KE_ax.set_ylim([0, 1.1 * np.max(KE)])
KE_ax.set_xlim([0, np.max(snap_t)])
plt.tight_layout()
plt.savefig(direc + "figure/fig_{:03d}.png".format(i))
filenames.append(direc + "figure/fig_{:03d}.png".format(i))
plt.close()
plt.clf()
except KeyboardInterrupt:
print("ending loop")
print("completed in {:.2f} sec".format(time.time() - start_time))
print("Creating gif...")
with imageio.get_writer(direc + "info.gif", mode="I") as writer:
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
print("Removing raw image files...")
shutil.rmtree(direc + "figure")
if args.KE:
with h5py.File(direc + "analysis/analysis_s1.h5", "r") as f:
KE = np.array(f["tasks"]["KE"])[:, 0]
snap_t = np.array(f["scales"]["sim_time"])
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111)
ax.plot(snap_t, KE, "k")
ax.set_xlabel(r"time [$\tau_\kappa$]")
ax.set_ylabel("KE")
plt.show()
plt.close()
print("done.")
| 2.5
| 2
|
direct_gd_predict/hash-profile.py
|
wac/meshop
| 1
|
12609
|
<reponame>wac/meshop<gh_stars>1-10
import sys
import heapq
import optparse
from bitcount2 import bitcount
hasher={}
profile={}
key_list=[]
key_col=0
def usage():
print sys.argv[0]," [profile_file]"
print " Load the profile lines from profile_file"
print " Hash function uses the features listed in profile_file"
print " and tests for p-value greater/less than or equal (0/1)"
print " Hash all the profiles from stdin"
exit(1)
def do_hash(hasher, p, key_list):
hashval=""
# for k, v in hasher.iteritems():
for k in key_list:
v=hasher[k]
if k in p and p[k] < v:
hashval=hashval+"1"
else:
hashval=hashval+"0"
return hashval
sep='|'
key_col=0
#feature_col=1
#score_col=6
in_feature_col=0
in_score_col=1
process_feature_col=1
process_score_col=6
parser = optparse.OptionParser()
#parser.add_option("-n", dest="heapsize",
# default=50, action="store", type="int")
#parser.add_option("-R", "--random", dest="use_random",
# default=False, action="store_true")
(options, args) = parser.parse_args(sys.argv)
if (len(args) > 1):
profile_filename=args[1]
else:
usage()
for line in open(profile_filename):
if line[0]=='#':
continue
tuples=line.strip().split(sep)
key=tuples[in_feature_col]
key_list.append(key)
hasher[key]=tuples[in_score_col]
curr_profile={}
old_key=""
for line in sys.stdin:
line=line.strip()
if line[0]=='#':
print line
continue
tuples=line.split(sep)
curr_key=tuples[key_col]
if not old_key:
old_key=curr_key
if not old_key==curr_key:
hashval=do_hash(hasher, curr_profile, key_list)
hashval_int=int(hashval, 2)
print old_key+sep+hashval+sep+str(hashval_int)+sep+str(bitcount(hashval_int))
curr_profile={}
old_key=curr_key
curr_profile[tuples[process_feature_col]]=tuples[process_score_col]
hashval=do_hash(hasher, curr_profile, key_list)
hashval_int=int(hashval, 2)
print old_key+sep+hashval+sep+str(hashval_int)+sep+str(bitcount(hashval_int))
| 2.6875
| 3
|
sdk/eventgrid/azure-eventgrid/azure/eventgrid/aio/_publisher_client_async.py
|
conniey/azure-sdk-for-python
| 2
|
12610
|
<filename>sdk/eventgrid/azure-eventgrid/azure/eventgrid/aio/_publisher_client_async.py<gh_stars>1-10
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from msrest import Deserializer, Serializer
from .._models import CloudEvent, EventGridEvent, CustomEvent
from .._helpers import _get_topic_hostname_only_fqdn, _get_authentication_policy, _is_cloud_event
from azure.core.pipeline.policies import AzureKeyCredentialPolicy
from azure.core.credentials import AzureKeyCredential
from .._generated.aio import EventGridPublisherClient as EventGridPublisherClientAsync
from .. import _constants as constants
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Union, Dict, List
SendType = Union[
CloudEvent,
EventGridEvent,
CustomEvent,
Dict,
List[CloudEvent],
List[EventGridEvent],
List[CustomEvent],
List[Dict]
]
class EventGridPublisherClient(object):
"""Asynchronous EventGrid Python Publisher Client.
:param str topic_hostname: The topic endpoint to send the events to.
:param credential: The credential object used for authentication which implements SAS key authentication or SAS token authentication.
:type credential: Union[~azure.core.credentials.AzureKeyCredential, azure.eventgrid.EventGridSharedAccessSignatureCredential]
"""
def __init__(self, topic_hostname, credential, **kwargs):
# type: (str, Union[AzureKeyCredential, EventGridSharedAccessSignatureCredential], Any) -> None
auth_policy = _get_authentication_policy(credential)
self._client = EventGridPublisherClientAsync(authentication_policy=auth_policy, **kwargs)
topic_hostname = _get_topic_hostname_only_fqdn(topic_hostname)
self._topic_hostname = topic_hostname
async def send(self, events, **kwargs):
# type: (SendType) -> None
"""Sends event data to topic hostname specified during client initialization.
:param events: A list or an instance of CloudEvent/EventGridEvent/CustomEvent to be sent.
:type events: SendType
:keyword str content_type: The type of content to be used to send the events.
Has default value "application/json; charset=utf-8" for EventGridEvents, with "cloudevents-batch+json" for CloudEvents
:rtype: None
:raises: :class:`ValueError`, when events do not follow specified SendType.
"""
if not isinstance(events, list):
events = [events]
if all(isinstance(e, CloudEvent) for e in events) or all(_is_cloud_event(e) for e in events):
kwargs.setdefault("content_type", "application/cloudevents-batch+json; charset=utf-8")
await self._client.publish_cloud_event_events(self._topic_hostname, events, **kwargs)
elif all(isinstance(e, EventGridEvent) for e in events) or all(isinstance(e, dict) for e in events):
kwargs.setdefault("content_type", "application/json; charset=utf-8")
await self._client.publish_events(self._topic_hostname, events, **kwargs)
elif all(isinstance(e, CustomEvent) for e in events):
serialized_events = [dict(e) for e in events]
await self._client.publish_custom_event_events(self._topic_hostname, serialized_events, **kwargs)
else:
raise ValueError("Event schema is not correct.")
| 1.476563
| 1
|
multi_tool.py
|
zbigos/multi_project_tools
| 0
|
12611
|
<reponame>zbigos/multi_project_tools<gh_stars>0
#!/usr/bin/env python3
import logging, sys, argparse
from collect import Collection
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="test a project repo")
parser.add_argument('--force-delete', help='instead of aborting on existing files, delete them', action='store_const', const=True)
subparsers = parser.add_subparsers(help='help for subcommand', dest="command")
parser.add_argument('--config', help="the config file listing all project directories", default='projects.yaml')
parser.add_argument('--local-config', help="the local environment config file", default='local.yaml')
parser.add_argument('--project', help="just run for a single project, supply project ID", type=int)
parser.add_argument('--test-module', help="run the module's test", action='store_const', const=True)
parser.add_argument('--prove-wrapper', help="check the wrapper proof", action='store_const', const=True)
parser.add_argument('--test-caravel', help="check the caravel test", action='store_const', const=True)
parser.add_argument('--test-gds', help="check the gds", action='store_const', const=True)
parser.add_argument('--test-lvs', help="check the gds against powered verilog", action='store_const', const=True)
parser.add_argument('--test-tristate-z', help="check outputs are z when not active", action='store_const', const=True)
parser.add_argument('--test-ports', help="check ports defined in yaml match the verilog", action='store_const', const=True)
parser.add_argument('--test-git', help="check gitsha on disk matches the config", action='store_const', const=True)
parser.add_argument('--test-all', help="run all the checks for each project", action='store_const', const=True)
parser.add_argument('--test-from', help="run all the checks for all projects with id equal or more than the given id", type=int)
parser.add_argument('--openram', help="use OpenRAM - instantiate the bridge, wrapper and do the wiring", action='store_const', const=True)
parser.add_argument('--clone-shared-repos', help="clone shared repos defined in projects.yaml", action='store_const', const=True)
parser.add_argument('--clone-repos', help="git clone the repo", action='store_const', const=True)
parser.add_argument('--create-openlane-config', help="create the OpenLane & caravel_user_project config", action='store_const', const=True)
parser.add_argument('--gate-level', help="create the caravel includes file with gate level includes", action='store_const', const=True)
parser.add_argument('--copy-project', help="copy project's RTL and tests to correct locations in caravel_user_project", action='store_const', const=True)
parser.add_argument('--copy-gds', help="copy the projects GDS and LEF files", action='store_const', const=True)
parser.add_argument('--generate-doc', help="generate a index.md file with information about each project", action='store_const', const=True)
parser.add_argument('--dump-hash', help="print current commit hash of each project along with author and title", action='store_const', const=True)
parser.add_argument('--fill', help="for testing, repeat the given projects this number of times", type=int)
parser.add_argument('--annotate-image', help="annotate the multi_macro.png image generated by klayout", action='store_const', const=True)
parser.add_argument('--dump-macro-position', help="use the macro.cfg + gds to create a list of positions and sizes", action='store_const', const=True)
parser.add_argument('--layout-tool', help="run the manual layout tool on current designs", action='store_const', const=True)
parser.add_argument('--layout-tool-downscale', help="scale factor for layout tool", type=int)
args = parser.parse_args()
# setup log
log_format = logging.Formatter('%(asctime)s - %(module)-15s - %(levelname)-8s - %(message)s')
# configure the client logging
log = logging.getLogger('')
# has to be set to debug as is the root logger
log.setLevel(logging.INFO)
# create console handler and set level to info
ch = logging.StreamHandler(sys.stdout)
# create formatter for console
ch.setFormatter(log_format)
log.addHandler(ch)
collection = Collection(args)
# run any tests specified by arguments
collection.run_tests()
if args.layout_tool:
collection.launch_layout_tool(args.layout_tool_downscale)
# create all the OpenLane config for the user collection wrapper
if args.create_openlane_config:
collection.create_openlane_config()
# copy gds to correct place
if args.copy_gds:
collection.copy_all_gds()
if args.copy_project:
collection.copy_all_project_files_to_caravel()
# generate doc
if args.generate_doc:
collection.generate_docs()
# image
if args.annotate_image:
collection.annotate_image()
# dump macro pos - wip for assisted macro placement
if args.dump_macro_position:
collection.get_macro_pos()
| 2.078125
| 2
|
benchbuild/projects/benchbuild/bots.py
|
ognarb/benchbuild
| 0
|
12612
|
from plumbum import local
from benchbuild import project
from benchbuild.utils import compiler, download, run, wrapping
from benchbuild.utils.cmd import make, mkdir
@download.with_git("https://github.com/bsc-pm/bots", limit=5)
class BOTSGroup(project.Project):
"""
Barcelona OpenMP Task Suite.
Barcelona OpenMP Task Suite is a collection of applications that allow
to test OpenMP tasking implementations and compare its behaviour under
certain circumstances: task tiedness, throttle and cut-offs mechanisms,
single/multiple task generators, etc.
Alignment: Aligns sequences of proteins.
FFT: Computes a Fast Fourier Transformation.
Floorplan: Computes the optimal placement of cells in a floorplan.
Health: Simulates a country health system.
NQueens: Finds solutions of the N Queens problem.
Sort: Uses a mixture of sorting algorithms to sort a vector.
SparseLU: Computes the LU factorization of a sparse matrix.
Strassen: Computes a matrix multiply with Strassen's method.
"""
DOMAIN = 'bots'
GROUP = 'bots'
VERSION = 'HEAD'
path_dict = {
"alignment": "serial/alignment",
"fft": "serial/fft",
"fib": "serial/fib",
"floorplan": "serial/floorplan",
"health": "serial/health",
"knapsack": "serial/knapsack",
"nqueens": "serial/nqueens",
"sort": "serial/sort",
"sparselu": "serial/sparselu",
"strassen": "serial/strassen",
"uts": "serial/uts"
}
input_dict = {
"alignment": ["prot.100.aa", "prot.20.aa"],
"floorplan": ["input.15", "input.20", "input.5"],
"health": ["large.input", "medium.input", "small.input", "test.input"],
"knapsack": [
"knapsack-012.input", "knapsack-016.input", "knapsack-020.input",
"knapsack-024.input", "knapsack-032.input", "knapsack-036.input",
"knapsack-040.input", "knapsack-044.input", "knapsack-048.input",
"knapsack-064.input", "knapsack-096.input", "knapsack-128.input"
],
"uts": [
"huge.input", "large.input", "medium.input", "small.input",
"test.input", "tiny.input"
]
}
SRC_FILE = "bots.git"
def compile(self):
self.download()
makefile_config = local.path(self.src_file) / "config" / "make.config"
clang = compiler.cc(self)
with open(makefile_config, 'w') as config:
lines = [
"LABEL=benchbuild",
"ENABLE_OMPSS=",
"OMPSSC=",
"OMPC=",
"CC={cc}",
"OMPSSLINK=",
"OMPLINK={cc} -fopenmp",
"CLINK={cc}",
"OPT_FLAGS=",
"CC_FLAGS=",
"OMPC_FLAGS=",
"OMPSSC_FLAGS=",
"OMPC_FINAL_FLAGS=",
"OMPSSC_FINAL_FLAG=",
"CLINK_FLAGS=",
"OMPLINK_FLAGS=",
"OMPSSLINK_FLAGS=",
]
lines = [l.format(cc=clang) + "\n" for l in lines]
config.writelines(lines)
mkdir(local.path(self.src_file) / "bin")
with local.cwd(self.src_file):
run.run(make["-C", self.path_dict[self.name]])
def run_tests(self, runner):
binary_name = "{name}.benchbuild.serial".format(name=self.name)
binary_path = local.path(self.src_file) / "bin" / binary_name
exp = wrapping.wrap(binary_path, self)
if self.name in self.input_dict:
for test_input in self.input_dict[self.name]:
input_file = local.path(
self.src_file) / "inputs" / self.name / test_input
runner(exp["-f", input_file])
else:
runner(exp)
class Alignment(BOTSGroup):
NAME = 'alignment'
class FFT(BOTSGroup):
NAME = 'fft'
class Fib(BOTSGroup):
NAME = 'fib'
class FloorPlan(BOTSGroup):
NAME = 'floorplan'
class Health(BOTSGroup):
NAME = 'health'
class Knapsack(BOTSGroup):
NAME = 'knapsack'
class NQueens(BOTSGroup):
NAME = 'nqueens'
class Sort(BOTSGroup):
NAME = 'sort'
class SparseLU(BOTSGroup):
NAME = 'sparselu'
class Strassen(BOTSGroup):
NAME = 'strassen'
class UTS(BOTSGroup):
NAME = 'uts'
| 1.984375
| 2
|
clients/python/setup.py
|
timtadh/queued
| 4
|
12613
|
<gh_stars>1-10
try:
from setuptools import setup
setup # quiet "redefinition of unused ..." warning from pyflakes
# arguments that distutils doesn't understand
setuptools_kwargs = {
'install_requires': [
],
'provides': ['queued'],
'zip_safe': False
}
except ImportError:
from distutils.core import setup
setuptools_kwargs = {}
setup(name='queued',
version=1.1,
description=(
'A client for queued'
),
author='<NAME>',
author_email='<EMAIL>',
url='queued.org',
packages=['queued',],
platforms=['unix'],
scripts=[],
**setuptools_kwargs
)
| 1.3125
| 1
|
sym_executor.py
|
zhangzhenghsy/fiber
| 0
|
12614
|
#!/usr/bin/python
import angr,simuvex
import sys,os
import time
from utils_sig import *
from sym_tracer import Sym_Tracer
from sig_recorder import Sig_Recorder
#This class is responsible for performing symbolic execution.
class Sym_Executor(object):
def __init__(self,options=None,dbg_out=False):
self.tracer = None
self.recorder = None
self.dbg_out = dbg_out
self._whitelist = set()
self._all_bbs = set()
self._num_find = 10
self.options = options
def _get_initial_state(self,proj,start,targetfunc=None):
if proj is None:
return None
st = proj.factory.blank_state(addr=start,symbolic_sp=True)
# print st.arch.registers.keys()
# We can customize the symbolic execution by setting various options in the state
# for a full list of available options:
# https://github.com/angr/simuvex/blob/master/simuvex/s_options.py
# E.g. st.options.add(simuvex.o.LAZY_SOLVES) ('options' is a set)
# CALLLESS to do intra-procedure analysis
st.options.add(simuvex.o.CALLLESS)
if targetfunc is not None:
st.options.add(str(hex(targetfunc)))
# To prevent the engine from discarding log history
st.options.add(simuvex.o.TRACK_ACTION_HISTORY)
if self.options.get('simplify_ast',True):
st.options.add(simuvex.o.SIMPLIFY_EXPRS)
st.options.add(simuvex.o.SIMPLIFY_MEMORY_READS)
st.options.add(simuvex.o.SIMPLIFY_MEMORY_WRITES)
st.options.add(simuvex.o.SIMPLIFY_EXIT_GUARD)
#TODO: Find a way to deal with function side-effect (i.e. a function call will output to a parameter, then the parameter will be used in a condition later)
st.options.add(simuvex.o.IGNORE_EXIT_GUARDS)
st.options.add(simuvex.o.IGNORE_MERGE_CONDITIONS)
st.options.add(simuvex.o.DONT_MERGE_UNCONSTRAINED)
#Use customized addr conc strategy
st.memory.read_strategies = [angr.concretization_strategies.SimConcretizationStrategyHZ(limit=3)]
st.memory.write_strategies = [angr.concretization_strategies.SimConcretizationStrategyHZ(limit=3)]
#print st.options
return st
#Include all the BBs along the path from start to ends in the cfg into the whitelist.
#The CFG here is CFGAcc.
def _prep_whitelist(self,cfg,cfg_bounds,ends,start=None,proj=None,sym_tab=None,cfg2=None,cfg_bounds2=None,ends2=None,start2=None,func_cfg=None):
#print "cfg:", [hex(n.addr) for n in cfg.nodes()]
#print cfg.functions[cfg_bounds[0]]
if cfg is None or cfg_bounds is None or len(cfg_bounds) < 2:
print '_prep_whitelist(): Incomplete CFG information'
return
#for addr in cfg2.functions:
# print cfg2.functions[addr]
if cfg2 is not None:
func_cfg2 = get_func_cfg(cfg2,cfg_bounds2[0],proj=proj,sym_tab=sym_tab)
if func_cfg is None:
print 'No func_cfg is available at %x' % cfg_bounds[0]
return
start = cfg_bounds[0]
self._all_bbs = set([x.addr for x in func_cfg.nodes()])
#print '_all_bbs: ' + str([hex(x) for x in list(self._all_bbs)])
#print '_all_bbs2: '+str([hex(x) for x in list(set([x.addr for x in func_cfg2.nodes()]))])
if cfg2 is not None:
self._all_bbs = self._all_bbs.union(set([x.addr for x in func_cfg2.nodes()]))
self._whitelist = get_node_addrs_between(func_cfg,start,ends,from_func_start=(start == cfg_bounds[0]))
if cfg2 is not None:
self._whitelist= self._whitelist.union(get_node_addrs_between(func_cfg2,start2,ends2,from_func_start=(start2 == cfg_bounds2[0])))
l = list(self._whitelist)
l.sort()
#print 'whitelist: ' + str([hex(x) for x in l])
l = list(self._all_bbs)
l.sort()
#print '_all_bbs: ' + str([hex(x) for x in l])
if self.dbg_out:
l = list(self._whitelist)
l.sort()
print 'whitelist: ' + str([hex(x) for x in l])
return
#Why we put a absolutely 'False' find_func here:
#(1)We rely on an accurate whitelist and all the nodes in the list should be explored, so we don't want
#to stop at a certain node.
#(2)With this find_func, basically we will have no states in the 'found' stash in the end, but that's OK
#because all the things we want to do will be done along the symbolic execution process.
def _find_func(self,p):
return False
def _avoid_func(self,p):
#print 'avoid_func: ' + str(hex(p.addr)) + ' ' + str(p.addr in whitelist)
#One problem is that, sometimes p.addr is in the middle of a certain BB, while in whitelist we only have start addresses of BBs.
#Currently for these cases, we will let it continue to execute because it will align to the BB starts later.
with open('testexplorenodes','a') as f:
f.write(str(hex(p.addr))+'\n')
return False if p.addr not in self._all_bbs else (not p.addr in self._whitelist)
#This is basically the 'hook_complete' used in 'explorer' technique, simply deciding whether num_find has been reached.
def _vt_terminator(self,smg):
return len(smg.stashes['found']) >= self._num_find
def _prep_veritesting_options(self,find=None,avoid=None,num_find=10):
if find is None:
find = self._find_func
if avoid is None:
avoid = self._avoid_func
#We need to construct an 'explorer' as an 'exploration_technique' used in the internal SimManager of Veritesting,
#which is basically the same one as used in normal DSE SimManager (by invoking 'explore()' method)
#NOTE that the Veritesting mode will use a separate SimManager, so we have to make TWO 'explorer'.
exp_tech = angr.exploration_techniques.Explorer(find=find,avoid=avoid,num_find=num_find)
veritesting_options = {}
#NOTE: 'loop_unrolling_limit' is compared and considered as 'passed' with '>=' instead of '>', that means if we use '1', no loops will be even entered.
#However we want exactly ONE loop execution, so we should should use '2' here actually.
veritesting_options['loop_unrolling_limit'] = 2
veritesting_options['tech'] = exp_tech
#NOTE that original 'explorer' technique will set a 'hook_complete' in SimManager, which will be passed from 'run()' to 'step()'
#as a 'until_func', however, Veritesting will not invoke 'run()', instead, it calls 'step()' directly, so this hook is basically
#invalidated. To deal with this, we provide a 'terminator' to Veritesting, which will terminate Veritesting when len(stashes[found]) > num_find
veritesting_options['terminator'] = self._vt_terminator
return veritesting_options
#Do the symbolic execution on the given CFG, from start to target, with Veritesting and Whitelist mechanisms.
#Params:
#proj: the angr project.
#states: if it's None, creates a default initial state@start, if start is None, then @cfg_bounds[0].
#cfg: cfg_accurate.
#cfg_bounds: a 2-element list, specifying the area of the target function (to be executed) in the cfg.
#start: Where to start the symbolic execution? Must be within the cfg_bounds.
#targets: Where to end the symbolic execution? Must be within the cfg_bounds. Can specify multiple targets in a list.
#Ret:
#The resulting SimManager.
def try_sym_exec(self,proj,cfg,cfg_bounds,targets,states=None,start=None,new_tracer=False,tracer=None,new_recorder=False,recorder=None,sym_tab=None,sigs=None,cfg2=None,cfg_bounds2=None,targets2=None, start2=None,func_cfg=None,num_find=10):
#print "start1: ", hex(start)
#print "start2: ", hex(start2)
if cfg is None or cfg_bounds is None or len(cfg_bounds) < 2:
print 'No CFG information available for sym exec.'
return None
#This is the start point of sym exec.
st = start if start is not None else cfg_bounds[0]
if start2 is not None:
st=start2
#Fill initial state.
#print 'hex(start)', hex(start)
#print 'str(hex(start))', str(hex(start))
if states is None:
if start2 is not None:
init_state = self._get_initial_state(proj,st,start)
#init_state = self._get_initial_state(proj,start)
else:
init_state = self._get_initial_state(proj,st)
states = [init_state]
#Whether we need to create a new Sym_Tracer to trace the symbolic execution
if new_tracer:
self.tracer = Sym_Tracer(symbol_table=sym_tab,dbg_out=self.dbg_out)
#for example:<class 'sym_tracer.Sym_Tracer'>: {'addr_collision': False, 'dbg_out': True, 'symbol_table': <sym_table.Sym_Table object at 0x7fffeba54890>, '_addr_conc_buf': [], '_sym_map': {}}
#Clear any remaining breakpoints
self.tracer.stop_trace(states)
self.tracer.trace(states)
else:
self.tracer = tracer
#Whether we need to create a new Sig_Recorder
if new_recorder:
if sigs is None:
print 'You must provide sigs if you want to use new recorder'
return
if self.tracer is None:
print 'You must provide tracer or specify new_tracer flag if you want to use new recorder'
return
self.recorder = Sig_Recorder(sigs,self.tracer,dbg_out=dbg_out)
#Clear any remaining breakpoints
self.recorder.stop_record(states)
#Record structural information (nodes and their relationships) and semantic information of 'root'
#instructions with per-instruction breakpoint, the structural information has already been partly recorded in the initial signature.
self.recorder.record(states)
else:
self.recorder = recorder
#Set the whitelist of basic blocks, we only want to include the BBs that along the paths from st to targets.
self._prep_whitelist(cfg,cfg_bounds,targets,start,proj=proj,sym_tab=sym_tab,cfg2=cfg2,cfg_bounds2=cfg_bounds2,ends2=targets2,start2=start2,func_cfg=func_cfg)
self._num_find = num_find
#Set the VeriTesting options
veritesting_options = self._prep_veritesting_options(num_find=self._num_find)
#Construct the simulation execution manager
smg = proj.factory.simgr(thing=states, veritesting=True, veritesting_options=veritesting_options)
#TODO: Do we still need to use loop limiter for the main DSE SimManager since Veritesting has already got a built-in loop limiter?
#limiter = angr.exploration_techniques.looplimiter.LoopLimiter(count=0, discard_stash='spinning')
#smg.use_technique(limiter)
t0 = time.time()
smg.explore(find=self._find_func, avoid=self._avoid_func, num_find=self._num_find)
print ['%s:%d ' % (name,len(stash)) for name, stash in smg.stashes.items()]
print 'Time elapsed: ' + str(time.time() - t0)
return smg
| 2.40625
| 2
|
apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/pymysql/constants/CR.py
|
tharindu1st/apim-migration-resources
| 1,573
|
12615
|
<filename>apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/pymysql/constants/CR.py
# flake8: noqa
# errmsg.h
CR_ERROR_FIRST = 2000
CR_UNKNOWN_ERROR = 2000
CR_SOCKET_CREATE_ERROR = 2001
CR_CONNECTION_ERROR = 2002
CR_CONN_HOST_ERROR = 2003
CR_IPSOCK_ERROR = 2004
CR_UNKNOWN_HOST = 2005
CR_SERVER_GONE_ERROR = 2006
CR_VERSION_ERROR = 2007
CR_OUT_OF_MEMORY = 2008
CR_WRONG_HOST_INFO = 2009
CR_LOCALHOST_CONNECTION = 2010
CR_TCP_CONNECTION = 2011
CR_SERVER_HANDSHAKE_ERR = 2012
CR_SERVER_LOST = 2013
CR_COMMANDS_OUT_OF_SYNC = 2014
CR_NAMEDPIPE_CONNECTION = 2015
CR_NAMEDPIPEWAIT_ERROR = 2016
CR_NAMEDPIPEOPEN_ERROR = 2017
CR_NAMEDPIPESETSTATE_ERROR = 2018
CR_CANT_READ_CHARSET = 2019
CR_NET_PACKET_TOO_LARGE = 2020
CR_EMBEDDED_CONNECTION = 2021
CR_PROBE_SLAVE_STATUS = 2022
CR_PROBE_SLAVE_HOSTS = 2023
CR_PROBE_SLAVE_CONNECT = 2024
CR_PROBE_MASTER_CONNECT = 2025
CR_SSL_CONNECTION_ERROR = 2026
CR_MALFORMED_PACKET = 2027
CR_WRONG_LICENSE = 2028
CR_NULL_POINTER = 2029
CR_NO_PREPARE_STMT = 2030
CR_PARAMS_NOT_BOUND = 2031
CR_DATA_TRUNCATED = 2032
CR_NO_PARAMETERS_EXISTS = 2033
CR_INVALID_PARAMETER_NO = 2034
CR_INVALID_BUFFER_USE = 2035
CR_UNSUPPORTED_PARAM_TYPE = 2036
CR_SHARED_MEMORY_CONNECTION = 2037
CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR = 2038
CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR = 2039
CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = 2040
CR_SHARED_MEMORY_CONNECT_MAP_ERROR = 2041
CR_SHARED_MEMORY_FILE_MAP_ERROR = 2042
CR_SHARED_MEMORY_MAP_ERROR = 2043
CR_SHARED_MEMORY_EVENT_ERROR = 2044
CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR = 2045
CR_SHARED_MEMORY_CONNECT_SET_ERROR = 2046
CR_CONN_UNKNOW_PROTOCOL = 2047
CR_INVALID_CONN_HANDLE = 2048
CR_SECURE_AUTH = 2049
CR_FETCH_CANCELED = 2050
CR_NO_DATA = 2051
CR_NO_STMT_METADATA = 2052
CR_NO_RESULT_SET = 2053
CR_NOT_IMPLEMENTED = 2054
CR_SERVER_LOST_EXTENDED = 2055
CR_STMT_CLOSED = 2056
CR_NEW_STMT_METADATA = 2057
CR_ALREADY_CONNECTED = 2058
CR_AUTH_PLUGIN_CANNOT_LOAD = 2059
CR_DUPLICATE_CONNECTION_ATTR = 2060
CR_AUTH_PLUGIN_ERR = 2061
CR_ERROR_LAST = 2061
| 1.21875
| 1
|
check-challenge.py
|
gjaiswal108/Check-if-Challenge-problem-added-in-codechef
| 0
|
12616
|
<filename>check-challenge.py
import requests,smtplib,time
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
r =requests.get('https://www.codechef.com/JUNE19B/')
while(1):
if('(Challenge)' in r.text):
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login("sender_gmail_id", "password")
msg= MIMEMultipart("alternative")
msg["Subject"]="Challenge Problem added"
msg["From"]="sender_gmail_id"
msg["To"]="receiver_gmail_id"
text="I guess challenge problem is added in long challenge,check it on codechef."
html="<h4>I guess challenge problem is added in long challenge,check it on codechef.</h4><br/><a href='https://www.codechef.com/'>Click here to visit. </a>"
msg.attach(MIMEText(html, "html"))
s.sendmail("sender_gmail_id","receiver_gmail_id",msg.as_string())
s.quit()
print('sent')
break
print('Sleeping...')
time.sleep(3600)
print('Trying again...')
| 2.90625
| 3
|
applications/tensorflow/click_through_rate/din/test/test_attention_fcn.py
|
kew96/GraphcoreExamples
| 0
|
12617
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests covering attention used by the DIN model.
"""
import tensorflow as tf
import unittest
import pytest
import numpy as np
import sys
from pathlib import Path
# Add common module to path
common_path = Path(Path(__file__).absolute().parent.parent.parent)
sys.path.append(str(common_path))
from common.utils import din_attention
from din.din_model import DIN
seed = 3
tf.set_random_seed(seed)
@pytest.mark.category1
@pytest.mark.ipus(1)
class TestDINFCN(unittest.TestCase):
"""Testing att layer"""
@classmethod
def setUpClass(cls):
cls.model_dtype = tf.float32
cls.ATTENTION_SIZE = 1
def test_att_results(self):
# test attention layer output
query_value = np.ones([4, 2], np.float32)
query_value = query_value * 0.8
query_inp = tf.placeholder(shape=[4, 2], dtype='float32')
facts_value = np.ones([4, 8, 2], np.float32)
facts_value = facts_value * 0.5
facts_inp = tf.placeholder(shape=[4, 8, 2], dtype='float32')
mask_value = np.ones([4, 8], np.float32)
mask_value = mask_value * 0.2
mask_inp = tf.placeholder(shape=[4, 8], dtype='float32')
out = din_attention(query_inp, facts_inp, self.ATTENTION_SIZE, mask_inp)
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(out, feed_dict={query_inp: query_value, facts_inp: facts_value, mask_inp: mask_value})
y0 = np.float32(0.5)
y1 = np.float32(0.5)
self.assertAlmostEqual(output[0, 0, 0], y0, delta = 0.01)
self.assertAlmostEqual(output[0, 0, 0], y1, delta = 0.01)
def test_fcn_results(self):
# test fcn results
inputs_value = np.ones([2, 6, 2], np.float32)
inp = tf.placeholder(shape=[2, 6, 2], dtype='float32')
y_hat = DIN.build_fcn_net(self, inp)
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())
y = sess.run(y_hat, feed_dict={inp: inputs_value})
y0 = np.float32(0.5225718)
y1 = np.float32(0.47742826)
self.assertAlmostEqual(y[0, 0, 0], y0, delta = 0.01)
self.assertAlmostEqual(y[0, 0, 1], y1, delta = 0.01)
| 1.796875
| 2
|
shiva/constants.py
|
tooxie/shiva-server
| 70
|
12618
|
# -*- coding: utf-8 -*-
class HTTP:
BAD_REQUEST = 400
UNAUTHORIZED = 401
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
CONFLICT = 409
UNSUPPORTED_MEDIA_TYPE = 415
| 1.382813
| 1
|
tests/test_cmdline.py
|
lin-zone/scrapyu
| 1
|
12619
|
<filename>tests/test_cmdline.py
import sys
import subprocess
from tempfile import mkdtemp, TemporaryFile
from path import Path
from tests import TEST_DIR
args = (sys.executable, '-m', 'scrapyu.cmdline')
def test_genspider_list():
new_args = args + ('genspider', '-l')
res = subprocess.check_output(new_args)
assert res.split() == [b'Available', b'templates:', b'single', b'single_splash']
def test_single_template():
single_test_template = Path(TEST_DIR) / 'test_templates' / 'single.py'
cwd = mkdtemp()
new_args = args + ('genspider', 'single', 'www.scrapytest.org', '-t', 'single')
with TemporaryFile() as out:
subprocess.call(new_args, stdout=out, stderr=out, cwd=cwd)
t = Path(cwd) / 'single.py'
assert t.exists() is True
assert t.read_text() == single_test_template.read_text()
def test_single_splash_template():
single_splash_test_template = Path(TEST_DIR) / 'test_templates' / 'single_splash.py'
cwd = mkdtemp()
new_args = args + ('genspider', 'single-splash', 'www.scrapytest.org', '-t', 'single_splash')
with TemporaryFile() as out:
subprocess.call(new_args, stdout=out, stderr=out, cwd=cwd)
t = Path(cwd) / 'single_splash.py'
assert t.exists() is True
assert t.read_text() == single_splash_test_template.read_text()
| 2.4375
| 2
|
src/olympia/amo/cron.py
|
dante381/addons-server
| 0
|
12620
|
<gh_stars>0
from datetime import datetime, timedelta
from django.core.files.storage import default_storage as storage
import olympia.core.logger
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons.models import Addon
from olympia.addons.tasks import delete_addons
from olympia.amo.utils import chunked
from olympia.files.models import FileUpload
from olympia.scanners.models import ScannerResult
from olympia.amo.models import FakeEmail
from . import tasks
from .sitemap import (
get_sitemap_path,
get_sitemaps,
get_sitemap_section_pages,
render_index_xml,
)
log = olympia.core.logger.getLogger('z.cron')
def gc(test_result=True):
"""Site-wide garbage collections."""
def days_ago(days):
return datetime.today() - timedelta(days=days)
log.info('Collecting data to delete')
logs = (
ActivityLog.objects.filter(created__lt=days_ago(90))
.exclude(action__in=amo.LOG_KEEP)
.values_list('id', flat=True)
)
for chunk in chunked(logs, 100):
tasks.delete_logs.delay(chunk)
two_weeks_ago = days_ago(15)
# Hard-delete stale add-ons with no versions. No email should be sent.
versionless_addons = Addon.unfiltered.filter(
versions__pk=None, created__lte=two_weeks_ago
).values_list('pk', flat=True)
for chunk in chunked(versionless_addons, 100):
delete_addons.delay(chunk, with_deleted=True)
# Delete stale FileUploads.
stale_uploads = FileUpload.objects.filter(created__lte=two_weeks_ago).order_by('id')
for file_upload in stale_uploads:
log.info(
'[FileUpload:{uuid}] Removing file: {path}'.format(
uuid=file_upload.uuid, path=file_upload.path
)
)
if file_upload.path:
try:
storage.delete(file_upload.path)
except OSError:
pass
file_upload.delete()
# Delete stale ScannerResults.
ScannerResult.objects.filter(upload=None, version=None).delete()
# Delete fake emails older than 90 days
FakeEmail.objects.filter(created__lte=days_ago(90)).delete()
def write_sitemaps(section=None, app_name=None):
index_filename = get_sitemap_path(None, None)
sitemaps = get_sitemaps()
if (not section or section == 'index') and not app_name:
with storage.open(index_filename, 'w') as index_file:
log.info('Writing sitemap index')
index_file.write(render_index_xml(sitemaps))
for _section, _app_name, _page in get_sitemap_section_pages(sitemaps):
if (section and section != _section) or (app_name and app_name != _app_name):
continue
if _page % 1000 == 1:
# log an info message every 1000 pages in a _section, _app_name
log.info(f'Writing sitemap file for {_section}, {_app_name}, {_page}')
filename = get_sitemap_path(_section, _app_name, _page)
with storage.open(filename, 'w') as sitemap_file:
sitemap_object = sitemaps.get((_section, amo.APPS.get(_app_name)))
if not sitemap_object:
continue
content = sitemap_object.render(app_name=_app_name, page=_page)
sitemap_file.write(content)
| 1.9375
| 2
|
day8/test_day8.py
|
bwbeach/advent-of-code-2020
| 0
|
12621
|
<filename>day8/test_day8.py
from day8.day8 import fix_code, parse_code, run
SAMPLE_CODE_LOOP = """nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
jmp -4
acc +6
"""
SAMPLE_CODE_HALT = """nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
nop -4
acc +6
"""
def test_parse():
assert parse_code("nop +0\nacc +1\nacc -6") == [("nop", 0), ("acc", 1), ("acc", -6)]
def test_run_loop():
code = parse_code(SAMPLE_CODE_LOOP)
assert run(code) == ("loop", 5)
def test_run_halt():
code = parse_code(SAMPLE_CODE_HALT)
assert run(code) == ("halt", 8)
def test_fix_code():
assert fix_code(parse_code(SAMPLE_CODE_LOOP)) == parse_code(SAMPLE_CODE_HALT)
| 2.96875
| 3
|
biosys/apps/main/tests/api/test_misc.py
|
florianm/biosys
| 2
|
12622
|
<reponame>florianm/biosys
from django.shortcuts import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from main.models import Project
from main.tests import factories
from main.tests.api import helpers
class TestWhoAmI(helpers.BaseUserTestCase):
def setUp(self):
super(TestWhoAmI, self).setUp()
self.url = reverse('api:whoami')
def test_get(self):
client = self.anonymous_client
self.assertEqual(
client.get(self.url).status_code,
status.HTTP_200_OK
)
user = factories.UserFactory()
user.set_password('password')
user.save()
client = APIClient()
self.assertTrue(client.login(username=user.username, password='password'))
resp = client.get(self.url)
self.assertEqual(
resp.status_code,
status.HTTP_200_OK
)
# test that the response contains username, first and last name and email at least and the id
data = resp.json()
self.assertEqual(user.username, data['username'])
self.assertEqual(user.first_name, data['first_name'])
self.assertEqual(user.last_name, data['last_name'])
self.assertEqual(user.email, data['email'])
self.assertEqual(user.id, data['id'])
# test that the password is not in the returned fields
self.assertFalse('password' in data)
def test_not_allowed_methods(self):
client = self.readonly_client
self.assertEqual(
client.post(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
self.assertEqual(
client.put(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
self.assertEqual(
client.patch(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
class TestStatistics(TestCase):
def setUp(self):
self.url = reverse('api:statistics')
def test_get(self):
anonymous = APIClient()
client = anonymous
self.assertIn(
client.get(self.url).status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
user = factories.UserFactory.create()
user.set_password('password')
user.save()
client = APIClient()
self.assertTrue(client.login(username=user.username, password='password'))
resp = client.get(self.url)
self.assertEqual(
resp.status_code,
status.HTTP_200_OK
)
# expected response with no data
expected = {
'projects': {'total': 0},
'datasets': {
'total': 0,
'generic': {'total': 0},
'observation': {'total': 0},
'speciesObservation': {'total': 0},
},
'records': {
'total': 0,
'generic': {'total': 0},
'observation': {'total': 0},
'speciesObservation': {'total': 0},
},
'sites': {'total': 0},
}
self.assertEqual(expected, resp.json())
# create one project
program = factories.ProgramFactory.create()
project = factories.ProjectFactory.create(program=program)
expected['projects']['total'] = 1
resp = client.get(self.url)
self.assertEqual(
resp.status_code,
status.HTTP_200_OK
)
self.assertEqual(expected, resp.json())
# create some sites
count = 3
factories.SiteFactory.create_batch(
count,
project=project
)
expected['sites']['total'] = count
resp = client.get(self.url)
self.assertEqual(
resp.status_code,
status.HTTP_200_OK
)
self.assertEqual(expected, resp.json())
def test_not_allowed_methods(self):
user = factories.UserFactory.create()
user.set_password('password')
user.save()
client = APIClient()
self.assertTrue(client.login(username=user.username, password='password'))
self.assertEqual(
client.post(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
self.assertEqual(
client.put(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
self.assertEqual(
client.patch(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
class TestSpecies(TestCase):
# set the species list to be the testing one
species_facade_class = helpers.LightSpeciesFacade
def setUp(self):
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
self.url = reverse('api:species')
def test_get(self):
anonymous = APIClient()
client = anonymous
self.assertEqual(
client.get(self.url).status_code,
status.HTTP_200_OK
)
user = factories.UserFactory.create()
user.set_password('password')
user.save()
client = APIClient()
self.assertTrue(client.login(username=user.username, password='password'))
resp = client.get(self.url)
self.assertEqual(
resp.status_code,
status.HTTP_200_OK
)
def test_not_allowed_methods(self):
user = factories.UserFactory.create()
user.set_password('password')
user.save()
client = APIClient()
self.assertTrue(client.login(username=user.username, password='password'))
self.assertEqual(
client.post(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
self.assertEqual(
client.put(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
self.assertEqual(
client.patch(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
| 2.3125
| 2
|
src/netwrok/analytics.py
|
simonwittber/netwrok-server
| 16
|
12623
|
<gh_stars>10-100
import asyncio
import aiopg
from . import nwdb
from . import core
@core.handler
def register(client, path, event):
"""
Register an event occuring at path. Created time is automatically added.
Useful for generic analytics type stuff.
"""
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
yield from cursor.execute("""
insert into analytics(member_id, path, event)
select %s, %s, %s
""", [client.session.get("member_id", None), path, event])
| 2.390625
| 2
|
monte_carlo/helpers/muaanalytical.py
|
nathhje/bachelorproject
| 0
|
12624
|
# -*- coding: utf-8 -*-
"""
Deterimines the reflectance based on r and mua.
"""
import math
import helpers.analyticalvalues as av
def reflectance(mua, r):
"""
mua: the absorption coefficient used.
r: the radial distance used.
"""
values = av.analyticalValues(r, mua)
# the value of the reflectance is determined
return (values.z0 * (values.ueff + values.rho1 ** -1) * math.exp( -values.ueff * values.rho1)
/ (values.rho1 ** 2) + (values.z0 + 2 * values.zb) * (values.ueff + values.rho2 ** -1)
* math.exp( -values.ueff * values.rho2) / (values.rho2 ** 2)) / 4 / math.pi
| 3.21875
| 3
|
main/test_data.py
|
anna01111/demo_web_ui_test_suite
| 0
|
12625
|
<filename>main/test_data.py
from faker import Faker
"""
More info: https://microservices-demo.github.io/docs/user-accounts.html
"""
# The demo app is shipped with the following account:
username = 'user'
password = 'password'
# Fake data that is used for new registrations:
faker = Faker()
autogenerated_username = faker.user_name()
autogenerated_first_name = faker.first_name()
autogenerated_last_name = faker.last_name()
autogenerated_email = faker.email()
autogenerated_password = faker.password()
| 2.0625
| 2
|
pfr/run.py
|
AnnaMag/pdf-flask-react
| 2
|
12626
|
<reponame>AnnaMag/pdf-flask-react<filename>pfr/run.py
from io import StringIO
from io import BytesIO
import urllib
from urllib import request
import utils
from pdf_processing import scrape_gazette_names, get_info_outline
from data_parsing import save_to_dict
if __name__ == '__main__':
# not saving anything locally, just the names listed on the webpage to access the files later
url = 'http://www.gpwonline.co.za/Gazettes/Pages/Published-National-Regulation-Gazettes.aspx'
doc_names = scrape_gazette_names(url)
db_name = 'gov_docs'
db_collection = 'nat_reg'
collection = utils.set_collection(db_name, db_collection)
for url in doc_names[0][3:5]:
print(url)
fp = BytesIO(urllib.request.urlopen(url).read())
info, device, pages_skipped = get_info_outline(fp)
print(info)
#pages_skipped should be pages for extraction- for now is to montitore problems
gaz_dict = save_to_dict(device.interesting_text, device.aux_text, \
pages_skipped, info, device.page_number, url)
print(gaz_dict)
utils.write_db(collection, gaz_dict)
| 2.796875
| 3
|
day9/day9.py
|
jaredledvina/adventofcode2020
| 1
|
12627
|
<filename>day9/day9.py
#!/usr/bin/env python3
import itertools
def read_input():
with open('input.txt') as f:
puzzle_input = f.read().splitlines()
puzzle_input = [int(num) for num in puzzle_input]
return puzzle_input
def part1(puzzle_input):
preamble = puzzle_input[:25]
remaining = puzzle_input[25:]
for item in remaining:
found_match = False
for product in itertools.product(preamble, preamble):
if product[0] + product[1] == item:
found_match = True
preamble.append(item)
preamble.pop(0)
break
if not found_match:
return item
def part2(puzzle_input):
invalid = part1(puzzle_input)
for position in range(len(puzzle_input)):
combination_position = 0
for combination in itertools.accumulate(puzzle_input[position:]):
if combination == invalid:
return min(puzzle_input[position:combination_position+position]) + max(puzzle_input[position:combination_position+position])
combination_position += 1
def main():
puzzle_input = read_input()
print(part1(puzzle_input))
print(part2(puzzle_input))
if __name__ == '__main__':
main()
| 3.6875
| 4
|
streamlitfront/tests/common.py
|
i2mint/streamlitfront
| 0
|
12628
|
from contextlib import contextmanager
from functools import partial
from inspect import Parameter
from random import choice, randint, uniform
import string
from typing import Any
from i2 import Sig
from numbers import Number
from sys import platform
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from strand import run_process
from streamlitfront.run_app import run_app
from time import sleep
import dill
import pickle
STREAMLIT_APP_URL = 'http://localhost:8501'
@contextmanager
def dispatch_funcs_with_selenium(funcs, headless=False):
"""
Dispatches the functions in a streamlit application and build a selenium object
representing the root of the DOM for the application.
"""
serialize_funcs = False
try:
pickle.dumps(funcs)
except:
serialize_funcs = True
_funcs = dill.dumps(funcs) if serialize_funcs else funcs
with run_process(func=run_app, func_kwargs={'funcs': _funcs}, is_ready=3) as proc:
options = ChromeOptions()
# options.add_argument('--no-sandbox')
options.add_argument('--window-size=1920,1080')
if headless:
options.add_argument('--headless')
# options.add_argument('--disable-gpu')
# options.add_argument('--allow-running-insecure-content')
dom = Chrome(service=Service(ChromeDriverManager().install()), options=options)
dom.get(STREAMLIT_APP_URL)
try:
yield dom
finally:
dom.close()
def give_a_chance_to_render_element(func):
"""
Gives a chance to the application to render the element by trying up to three times
with 1 second of interval to find it before raising an error.
"""
# @wrap(func)
def wrapper(*args, **kwargs):
def _try_to_find_element(intent_nb):
try:
return func(*args, **kwargs)
except NoSuchElementException:
if intent_nb < 3:
sleep(1)
return _try_to_find_element(intent_nb + 1)
raise
return _try_to_find_element(1)
return wrapper
@give_a_chance_to_render_element
def find_element_by_css_selector(css_selector, root):
return root.find_element(By.CSS_SELECTOR, css_selector)
def select_func(idx, root):
radio_button = find_element_by_css_selector(
f".block-container .stRadio div[role='radiogroup'] label:nth-child({idx + 1})",
root,
)
radio_button.click()
sleep(0.5)
def send_input(input_, idx, root):
def get_input_type():
if isinstance(input_, Number):
return 'number'
if isinstance(input_, str):
return 'text'
input_type = get_input_type()
input_el = find_element_by_css_selector(
f".main .element-container:nth-child({idx + 2}) input[type='{input_type}']",
root,
)
input_el.click()
select_all_first_key = Keys.COMMAND if platform == 'darwin' else Keys.CONTROL
input_el.send_keys(select_all_first_key, 'a')
input_el.send_keys(str(input_))
def compute_output(func, root):
def get_output(previous_output=None, intent_nb=1):
output_el = find_element_by_css_selector(output_css_selector, root)
if output_el.find_elements(By.TAG_NAME, 'code'):
output_el = find_element_by_css_selector('code', output_el)
output = output_el.text
return_annot = Sig(func).return_annotation
if return_annot not in (Parameter.empty, Any):
output = return_annot(output)
if previous_output is not None and output == previous_output and intent_nb < 3:
sleep(1)
return get_output(previous_output, intent_nb + 1)
return output
def get_previous_output():
if root.find_elements(By.CSS_SELECTOR, output_css_selector):
return get_output()
nb_args = len(Sig(func))
output_css_selector = f'.element-container:nth-child({nb_args + 3}) .stMarkdown p'
previous_output = get_previous_output()
submit_button = find_element_by_css_selector(
f'.element-container:nth-child({nb_args + 2}) button', root
)
submit_button.click()
return get_output(previous_output)
| 2.46875
| 2
|
Python_files/analyse.py
|
Deniz-shelby/goodreads_webscrap
| 0
|
12629
|
<gh_stars>0
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.preprocessing import MinMaxScaler
import warnings
import scipy.stats as st
import statsmodels as sm
def analyse(df_input,df_all_input):
df = df_input
fig, ax = plt.subplots(figsize=(17,8))
plt.scatter(df['num_pages'],df['num_ratings'],
label = 'books',
color = 'lightpink',
edgecolor = 'darkviolet')
plt.xlabel('num_pages', fontsize=20,labelpad=20)
plt.ylabel('num_ratings', fontsize=20,labelpad=20)
plt.title('2D Scatterplot', fontsize=38,y=1.15)
plt.xlim(0,1900)
plt.xticks(np.arange(0,1900,100),fontsize=14, rotation=45)
#plt.ylim(0,max(df['num_ratings']))
plt.yticks(np.arange(0,max(df['num_ratings']),1000000),fontsize=14)
plt.grid(True,linestyle='dashed')
plt.show()
# 3
x=df['num_pages']
y=df['num_ratings']
# Pearson
pearson = st.pearsonr(x, y)
print(f'Pearson: Correlation= {pearson[0]} , p-value= {pearson[1]}')
# Spear
spear = st.spearmanr(x, y)
print(f'Spear: Correlation= {spear[0]} , p-value= {spear[1]}')
# Kendal
kendal = st.kendalltau(x,y)
print(f'Kendal: Correlation= {kendal [0]} , p-value= {kendal [1]}')
# python
python_corr = df['num_pages'].corr(df['num_ratings'])
print(f'Correlation= {python_corr}')
#### avg_rating
fig, ax = plt.subplots(figsize=(17,8))
plt.hist(df.avg_rating,
bins = np.arange(3.5,4.65,0.1), ## change for a better bin scale
color='cornflowerblue',
edgecolor = "white")
plt.xticks(np.arange(3.5,4.65,0.1),fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xlabel("Averge Rating", fontsize=20,labelpad=20)
plt.ylabel("Books", fontsize=20,labelpad=20)
plt.title('Distribution of avg_rating', fontsize=28,y=1.15)
plt.grid(True,linestyle='dashed')
plt.show()
fig, ax = plt.subplots(figsize=(17,8))
plt.hist(df.avg_rating,
bins = np.arange(3.5,4.65,0.025), ## change for a better bin scale
color='cornflowerblue',
edgecolor = "white")
plt.xticks(np.arange(3.5,4.65,0.1),fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xlabel("Averge Rating", fontsize=20,labelpad=20)
plt.ylabel("Books", fontsize=20,labelpad=20)
plt.title('Distribution of avg_rating', fontsize=28,y=1.15)
plt.grid(True,linestyle='dashed')
plt.show()
fig, ax = plt.subplots(figsize=(17,8))
plt.hist(df.avg_rating,
bins = np.arange(3.5,4.65,0.01), ## change for a better bin scale
color='cornflowerblue',
edgecolor = "white")
plt.xticks(np.arange(3.5,4.65,0.1),fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xlabel("Averge Rating", fontsize=20,labelpad=20)
plt.ylabel("Books", fontsize=20,labelpad=20)
plt.title('Distribution of avg_rating', fontsize=28,y=1.15)
plt.grid(True,linestyle='dashed')
plt.show()
### 4
fig, ax = plt.subplots(figsize=(17,8))
plt.hist(df.minmax_norm_ratings,
bins = np.arange(0,10,0.5), ## change for a better bin scale
color='cornflowerblue',
edgecolor = "white")
plt.xticks(np.arange(0,10,1),fontsize=14, rotation=45)
plt.xticks(fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xlabel("minmax_norm Rating", fontsize=20,labelpad=20)
plt.ylabel("Books", fontsize=20,labelpad=20)
plt.title('Distribution of minmax_norm_ratings', fontsize=28,y=1.15)
plt.grid(True,linestyle='dashed')
plt.xlim(0,10)
plt.show()
### 4
fig, ax = plt.subplots(figsize=(17,8))
plt.hist(df.minmax_norm_ratings,
bins = np.arange(0,10,0.1), ## change for a better bin scale
color='cornflowerblue',
edgecolor = "white")
plt.xticks(np.arange(0,10,1),fontsize=14, rotation=45)
plt.xticks(fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xlabel("minmax_norm Rating", fontsize=20,labelpad=20)
plt.ylabel("Books", fontsize=20,labelpad=20)
plt.title('Distribution of minmax_norm_ratings', fontsize=28,y=1.15)
plt.grid(True,linestyle='dashed')
plt.xlim(0,10)
plt.show()
### 5
fig, ax = plt.subplots(figsize=(17,8))
plt.hist(df.mean_norm_ratings,
bins = np.arange(0,10,0.5), ## change for a better bin scale
color='cornflowerblue',
edgecolor = "white")
plt.xticks(np.arange(2,9,1),fontsize=14, rotation=45)
plt.xticks(fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xlabel("mean_norm Rating", fontsize=20,labelpad=20)
plt.ylabel("books", fontsize=20,labelpad=20)
plt.title('Distribution of mean_norm_ratings', fontsize=28,y=1.15)
plt.grid(True,linestyle='dashed')
plt.xlim(2,9)
plt.show()
fig, ax = plt.subplots(figsize=(17,8))
plt.hist(df.mean_norm_ratings,
bins = np.arange(2,9,0.1), ## change for a better bin scale
color='cornflowerblue',
edgecolor = "white")
plt.xticks(np.arange(0,10,1),fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xlabel("mean_norm Rating", fontsize=20,labelpad=20)
plt.ylabel("books", fontsize=20,labelpad=20)
plt.title('Distribution of mean_norm_ratings', fontsize=28,y=1.2)
plt.grid(True,linestyle='dashed')
plt.xlim(2,9)
plt.show()
# 6
fig, ax = plt.subplots(figsize=(14,8))
bins =np.arange(0,10,1)
plt.hist([df['minmax_norm_ratings'],df['mean_norm_ratings']],
bins,
label=['minamx_norm_ratings','mean_norm_ratings'],
color=['cornflowerblue','lightpink'],
edgecolor = "white")
plt.xticks(np.arange(0,10,0.5),fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xlabel("norm_rating", fontsize=20,labelpad=20)
plt.ylabel("books", fontsize=20,labelpad=20)
plt.title('Distribution of mean_norm_ratings', fontsize=28,y=1.2)
plt.grid(True,linestyle='dashed')
plt.xlim(0,10)
plt.show()
fig, ax = plt.subplots(figsize=(17,8))
bins =np.arange(0,10,0.5)
plt.hist([df['minmax_norm_ratings'],df['mean_norm_ratings']],
bins,
label=['minamx_norm_ratings','mean_norm_ratings'],
color=['cornflowerblue','lightpink'],
edgecolor = "white")
plt.xticks(np.arange(0,10,0.5),fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xlabel("norm_rating", fontsize=20,labelpad=20)
plt.ylabel("Books", fontsize=20,labelpad=20)
plt.title('Distribution of mean_norm_ratings', fontsize=28,y=1.2)
plt.grid(True,linestyle='dashed')
plt.xlim(0,10)
plt.show()
matplotlib.rcParams['figure.figsize'] = (18, 10)
matplotlib.style.use('ggplot')
# Create models from data
def best_fit_distribution(data, bins=200, ax=None):
"""Model data by finding best fit distribution to data"""
# Get histogram of original data
y, x = np.histogram(data, bins=bins, density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
# Distributions to check
DISTRIBUTIONS = [
st.alpha,st.anglit,st.arcsine,st.beta,st.betaprime,st.bradford,st.burr,st.cauchy,st.chi,st.chi2,st.cosine,
st.dgamma,st.dweibull,st.erlang,st.expon,st.exponnorm,st.exponweib,st.exponpow,st.f,st.fatiguelife,st.fisk,
#st.foldcauchy,st.foldnorm,st.frechet_r,st.frechet_l,st.genlogistic,st.genpareto,st.gennorm,st.genexpon,
st.genextreme,st.gausshyper,st.gamma,st.gengamma,st.genhalflogistic,st.gilbrat,st.gompertz,st.gumbel_r,
st.gumbel_l,st.halfcauchy,st.halflogistic,st.halfnorm,st.halfgennorm,st.hypsecant,st.invgamma,st.invgauss,
st.invweibull,st.johnsonsb,st.johnsonsu,st.ksone,st.kstwobign,st.laplace,st.levy,st.levy_l,st.levy_stable,
st.logistic,st.loggamma,st.loglaplace,st.lognorm,st.lomax,st.maxwell,st.mielke,st.nakagami,st.ncx2,st.ncf,
st.nct,st.norm,st.pareto,st.pearson3,st.powerlaw,st.powerlognorm,st.powernorm,st.rdist,st.reciprocal,
st.rayleigh,st.rice,st.recipinvgauss,st.semicircular,st.t,st.triang,st.truncexpon,st.truncnorm,st.tukeylambda,
st.uniform,st.vonmises,st.vonmises_line,st.wald,st.weibull_min,st.weibull_max,st.wrapcauchy
]
# Best holders
best_distribution = st.norm
best_params = (0.0, 1.0)
best_sse = np.inf
# Estimate distribution parameters from data
for distribution in DISTRIBUTIONS:
# Try to fit the distribution
try:
# Ignore warnings from data that can't be fit
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
# fit dist to data
params = distribution.fit(data)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
# if axis pass in add to plot
try:
if ax:
pd.Series(pdf, x).plot(ax=ax)
end
except Exception:
pass
# identify if this distribution is better
if best_sse > sse > 0:
best_distribution = distribution
best_params = params
best_sse = sse
except Exception:
pass
return (best_distribution.name, best_params)
def make_pdf(dist, params, size=10000):
"""Generate distributions's Probability Distribution Function """
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Get sane start and end points of distribution
start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale)
end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale)
# Build PDF and turn into pandas Series
x = np.linspace(start, end, size)
y = dist.pdf(x, loc=loc, scale=scale, *arg)
pdf = pd.Series(y, x)
return pdf
# Plot for comparison takes time
plt.figure(figsize=(15,10))
#ax = data.plot(kind='hist', bins=50, normed=True, alpha=0.5, color=plt.rcParams['axes.color_cycle'][1])
ax = df.minmax_norm_ratings.hist(
bins=20,
alpha=0.5,
density=True,
color='cornflowerblue',
edgecolor = 'white')
# Save plot limits
dataYLim = ax.get_ylim()
# Find best fit distribution
best_fit_name, best_fit_params = best_fit_distribution(df.minmax_norm_ratings, 200, ax)
best_dist = getattr(st, best_fit_name)
# Update plots
ax.set_ylim(dataYLim)
ax.set_title(u'Minmax norm rating')
ax.set_xlabel(u'Frequency')
ax.set_ylabel('Frequency')
# runs fast
plt.figure(figsize=(14,8))
ax = pdf.plot(lw=2, label='PDF', legend=True)
df.minmax_norm_ratings.plot(kind='hist',
bins=50,
density=True,
alpha=0.5,
label='Data',
color='cornflowerblue',
legend=True,
ax=ax)
param_names = (best_dist.shapes + ', loc, scale').split(', ') if best_dist.shapes else ['loc', 'scale']
param_str = ', '.join(['{}={:0.2f}'.format(k,v) for k,v in zip(param_names, best_fit_params)])
dist_str = '{}({})'.format(best_fit_name, param_str)
ax.set_title(u'minmax_norm with best fit distribution \n' + dist_str)
ax.set_xlabel(u'norm_ratings')
ax.set_ylabel('Frequency')
########## 8
###
fig, ax = plt.subplots(figsize=(17,8))
plt.hist(df.awards_count,
bins = np.arange(0,30,1), ## change for a better bin scale
color='cornflowerblue',
edgecolor = "white")
plt.xticks(np.arange(1,30,1),fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xlabel("mean_norm awards_count", fontsize=20,labelpad=20)
plt.ylabel("frequency", fontsize=20,labelpad=20)
plt.title('awards_count', fontsize=28,y=1.2)
plt.grid(True,linestyle='dashed')
plt.xlim(1,30)
plt.show()
fig, ax = plt.subplots(figsize=(17,8))
aggregate = df.groupby('original_publish_year')['awards_count'].agg('max','mean')
plt.hist(aggregate,
bins = np.arange(0,30,1), ## change for a better bin scale
color=['cornflowerblue'],
edgecolor = "white")
plt.xticks(fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xticks(np.arange(1,30,1),fontsize=14, rotation=45)
plt.xlabel("mean_norm awards_count", fontsize=20,labelpad=20)
plt.ylabel("awards", fontsize=20,labelpad=20)
plt.title('Aggregation plot for awards', fontsize=28,y=1.2)
plt.grid(True,linestyle='dashed')
plt.xlim(1,30,1)
plt.show()
fig, ax = plt.subplots(figsize=(10,8))
plt.boxplot(df['awards_count'])
plt.xticks(fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xticks()
plt.ylabel("awards", fontsize=20,labelpad=20)
plt.title('Awards distribution', fontsize=28,y=1.2)
plt.grid(True,linestyle='dashed')
ax.set_xticks([])
plt.show()
## 9
year_minmax=df.groupby("original_publish_year")['minmax_norm_ratings'].mean().round(decimals=2)
fig, ax = plt.subplots(figsize=(17,8))
plt.plot(year_minmax,color='cornflowerblue')
plt.xticks(fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xticks(np.arange(1900,2001,10),fontsize=14, rotation=45)
plt.xlabel("year", fontsize=20,labelpad=20)
plt.ylabel("aminmax_norm_ratings", fontsize=20,labelpad=20)
plt.title('Average Ratings by Year', fontsize=28,y=1.2)
plt.grid(True,linestyle='dashed')
plt.xlim(1900,2000)
plt.show()
##10
fig, ax = plt.subplots(figsize=(17,8))
plt.scatter(df['original_publish_year'],df['minmax_norm_ratings'],
label = 'books',
color = 'lightpink',
edgecolor = 'darkviolet')
plt.xticks(fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xticks(np.arange(1900,2001,10),fontsize=14, rotation=45)
plt.xlabel("year", fontsize=20,labelpad=20)
plt.ylabel("aminmax_norm_ratings", fontsize=20,labelpad=20)
plt.title('Average Ratings by Year', fontsize=28,y=1.2)
plt.grid(True,linestyle='dashed')
plt.xlim(1900,2000)
plt.show()
df_all = df_all_input
count_awards = len(df) #allwith awards
count_all = len(df_all) # get all
#Series all
series_count_all = df_all['series'].value_counts()
count_have_series_all = series_count_all[True]
count_no_series_all = series_count_all[False]
prob_series_all=count_have_series_all/count_all
prob_series_perc_all=round((count_have_series_all/count_all)*100,2)
print(f'Probabilty of having a series is in all : {prob_series_perc_all} %')
#Series in award
series_count = df['series'].value_counts()
count_have_series = series_count[True]
count_no_series = series_count[False]
prob_series=count_have_series/count_awards
prob_series_perc=round((count_have_series/count_awards)*100,2)
print(f'Probabilty of having a series is : {prob_series_perc} %')
#Awards
prob_awards=count_all/1100
prob_awards_perc=round((count_awards/1100)*100,2)
print(f'Probabilty of having a awards is : {prob_awards_perc} %')
##
prob=round(prob_awards_perc*prob_series_perc/prob_series_perc_all,2)
print(f'probability that a book that is part of a series has won an award is: {prob} %')
| 2.78125
| 3
|
3 assignment/number_of_digits_unitest.py
|
nastae/programavimas_python
| 0
|
12630
|
<gh_stars>0
import unittest
def number_of_digits(s):
return sum(c.isdigit() for c in s)
# Parašykite funkcijai X unittest'us
class Test(unittest.TestCase):
def test_only_digits(self):
s = "123456789"
self.assertEqual(number_of_digits(s), 9)
def test_only_letters(self):
s = "abcdef"
self.assertEqual(number_of_digits(s), 0)
def test_digits_between_letters(self):
s = "asd123asd123asd"
self.assertEqual(number_of_digits(s), 6)
def test_letters_between_digits(self):
s = "123asd123asd123"
self.assertEqual(number_of_digits(s), 9)
def test_neither_letter_or_digit(self):
s = ",./;';'[]`"
self.assertEqual(number_of_digits(s), 0)
if __name__ == '__main__':
unittest.main()
| 3.796875
| 4
|
scraper/edx.py
|
thanasis457/Mooc-platform
| 4
|
12631
|
import requests, json, bs4, urllib.parse, math
from . import Course, Platform
class Edx(Platform):
name = 'edX'
def _urls(self):
res = requests.get(make_url())
count = json.loads(res.text)['objects']['count']
num_pages = math.ceil(count / 20)
urls = [make_url(page=page) for page in range(1, num_pages + 1)]
return urls
def _parse(self, url):
res = requests.get(url)
courses = []
results = res.json()['objects']['results']
for result in results:
title = result['title']
if result['full_description']:
description = html_to_text(result['full_description'])
else:
description = result['short_description']
snippet = ''
if result['short_description'] and result['short_description'] != '.':
snippet = result['short_description']
url = result['marketing_url']
tags = [subject_uuids.get(uuid) for uuid in result['subject_uuids']]
partners = [result.get('org')]
course = Course(title, partners, self.name,
description, tags, url, snippet=snippet)
courses.append(course)
return courses
subject_uuids = {'d8244ef2-45fb-4be3-a9d7-a6749cee3b19': 'Architecture',
'2cc66121-0c07-407b-96c4-99305359a36f': 'Art & Culture',
'9d5b5edb-254a-4d54-b430-776f1f00eaf0': 'Biology & Life Sciences',
'409d43f7-ff36-4834-9c28-252132347d87': 'Business & Management',
'c5ec1f86-4e59-4273-8e22-ceec2b8d10a2': 'Chemistry',
'605bb663-a342-4cf3-b5a5-fee2f33f1642': 'Communication',
'e52e2134-a4e4-4fcb-805f-cbef40812580': 'Computer Science',
'a168a80a-4b6c-4d92-9f1d-4c235206feaf': 'Data Analysis & Statistics',
'34173fb0-fe3d-4715-b4e0-02a9426a873c': 'Design',
'bab458d9-19b3-476e-864f-8abd1d1aab44': 'Economics & Finance',
'8ac7a3da-a60b-4565-b361-384baaa49279': 'Education & Teacher Training',
'337dfb23-571e-49d7-9c8e-385120dea6f3': 'Electronics',
'07406bfc-76c4-46cc-a5bf-2deace7995a6': 'Energy & Earth Sciences',
'0d7bb9ed-4492-419a-bb44-415adafd9406': 'Engineering',
'8aaac548-1930-4614-aeb4-a089dae7ae26': 'Environmental Studies',
'8a552a20-963e-475c-9b0d-4c5efe22d015': 'Ethics',
'caa4db79-f325-41ca-8e09-d5bb6e148240': 'Food & Nutrition',
'51a13a1c-7fc8-42a6-9e96-6636d10056e2': 'Health & Safety',
'c8579e1c-99f2-4a95-988c-3542909f055e': 'Histroy',
'00e5d5e0-ce45-4114-84a1-50a5be706da5': 'Humanities',
'32768203-e738-4627-8b04-78b0ed2b44cb': 'Language',
'4925b67d-01c4-4287-a8d1-a3e0066113b8': 'Law',
'74b6ed2a-3ba0-49be-adc9-53f7256a12e1': 'Literature',
'a669e004-cbc0-4b68-8882-234c12e1cce4': 'Math',
'a5db73b2-05b4-4284-beef-c7876ec1499b': 'Medicine',
'f520dcc1-f5b7-42fe-a757-8acfb1e9e79d': 'Music',
'830f46dc-624e-46f4-9df0-e2bc6b346956': 'Philosophy & Ethics',
'88eb7ca7-2296-457d-8aac-e5f7503a9333': 'Physics',
'f830cfeb-bb7e-46ed-859d-e2a9f136499f': 'Science',
'eefb009b-0a02-49e9-b1b1-249982b6ce86': 'Social Sciences'}
def make_url(page=1):
params = {'selected_facets[]': 'transcript_languages_exact:English',
'partner': 'edx',
'content_type[]': 'courserun',
'page': page,
'page_size': 20}
return 'https://www.edx.org/api/v1/catalog/search?' + urllib.parse.urlencode(params)
def html_to_text(html):
soup = bs4.BeautifulSoup(html, 'lxml')
return soup.text
| 2.84375
| 3
|
main.py
|
LaudateCorpus1/TotalConnect2.0_API-Arm-Disarm
| 1
|
12632
|
#!/usr/local/bin/python2.7
#FREEBSD 2 Minutes ARP Expires - /bin/echo "net.link.ether.inet.max_age 300" >> /etc/sysctl.conf
#Crontab -e "* * * * * /usr/local/bin/python2.7 /root/Security.py"
import subprocess
import ConfigParser
import string, os, sys, httplib
import xml.etree.ElementTree as ET
from datetime import datetime, time
now = datetime.now()
now_time = now.time()
#---- BOL FOR CONFIGURTION INI ----#
# Documentation: https://wiki.python.org/moin/ConfigParserExamples #
Config = ConfigParser.ConfigParser()
Config.read("Security.ini")
cfgfile = open("Security.ini")
def BoolConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.getboolean(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
state = BoolConfigSectionMap("Status")['armed']
#---- EOL FOR CONFIGURTION INI ----#
device1 = '00:00:00:00:00:00'
device2 = '00:00:00:00:00:00'
device3 = '00:00:00:00:00:00'
#---- BOL for LOG Output ---- #
Log = open('SecurityAuditlog.txt', 'w')
print >> Log, "---------",now_time,"---------"
#---- BOL API Section ----#
def TC2_SOAPSessionID():
global sessionHash
server_addr = "rs.alarmnet.com"
service_action = "/TC21API/TC2.asmx"
username = ConfigSectionMap("Authentication")['username']
password = ConfigSectionMap("Authentication")['password']
body = """
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><soapenv:Header/><soapenv:Body><tns:AuthenticateUserLoginEx xmlns:tns="https://services.alarmnet.com/TC2/"><tns:userName>%s</tns:userName>"""
body1 = """<tns:password>%s</tns:password><tns:ApplicationID>14588</tns:ApplicationID><tns:ApplicationVersion>3.14.2</tns:ApplicationVersion><tns:LocaleCode></tns:LocaleCode></tns:AuthenticateUserLoginEx></soapenv:Body></soapenv:Envelope>"""
request = httplib.HTTPSConnection(server_addr)
request.putrequest("POST", service_action)
request.putheader("Accept", "application/soap+xml, application/dime, multipart/related, text/*")
request.putheader("Content-Type", "text/xml; charset=utf-8")
request.putheader("Cache-Control", "no-cache")
request.putheader("Pragma", "no-cache")
request.putheader("SOAPAction","https://services.alarmnet.com/TC2/AuthenticateUserLoginEx")
request.putheader("Content-Length", str(len(body % username + body1 % password)))
request.endheaders()
request.send(body % username + body1 % password)
response = request.getresponse().read()
tree = ET.fromstring(response)
sessionHash = tree.find('.//{https://services.alarmnet.com/TC2/}SessionID').text
return
def TC2_DisarmSecuritySystem():
TC2_SOAPSessionID()
server_addr = "rs.alarmnet.com"
service_action = "/TC21API/TC2.asmx"
body = ("""<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:s="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<SOAP-ENV:Body>
<tns:DisarmSecuritySystem xmlns:tns="https://services.alarmnet.com/TC2/">
<tns:SessionID>%s</tns:SessionID>
<tns:LocationID>0</tns:LocationID>
<tns:DeviceID>0</tns:DeviceID>
<tns:UserCode>-1</tns:UserCode>
</tns:DisarmSecuritySystem>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>""")
request = httplib.HTTPSConnection(server_addr)
request.putrequest("POST", service_action)
request.putheader("Accept", "application/soap+xml, application/dime, multipart/related, text/*")
request.putheader("Content-Type", "text/xml; charset=utf-8")
request.putheader("Cache-Control", "no-cache")
request.putheader("Pragma", "no-cache")
request.putheader("SOAPAction","https://services.alarmnet.com/TC2/DisarmSecuritySystem")
request.putheader("Content-Length", str(len(body % sessionHash)))
request.endheaders()
request.send(body % sessionHash)
response = request.getresponse().read()
tree = ET.fromstring(response)
print >> Log, "API:", tree.find('.//{https://services.alarmnet.com/TC2/}ResultData').text
return
def TC2_ArmSecuritySystem(armInt):
TC2_SOAPSessionID()
server_addr = "rs.alarmnet.com"
service_action = "/TC21API/TC2.asmx"
body = ("""<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:s="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<SOAP-ENV:Body>
<tns:ArmSecuritySystem xmlns:tns="https://services.alarmnet.com/TC2/">
<tns:SessionID>%s</tns:SessionID>
<tns:LocationID>0</tns:LocationID>
<tns:DeviceID>0</tns:DeviceID>""")
body1 = ("""<tns:ArmType>%s</tns:ArmType>
<tns:UserCode>-1</tns:UserCode>
</tns:ArmSecuritySystem>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>""")
request = httplib.HTTPSConnection(server_addr)
request.putrequest("POST", service_action)
request.putheader("Accept", "application/soap+xml, application/dime, multipart/related, text/*")
request.putheader("Content-Type", "text/xml; charset=utf-8")
request.putheader("Cache-Control", "no-cache")
request.putheader("Pragma", "no-cache")
request.putheader("SOAPAction","https://services.alarmnet.com/TC2/ArmSecuritySystem")
request.putheader("Content-Length", str(len(body % sessionHash + body1 % armInt)))
request.endheaders()
request.send(body % sessionHash + body1 % armInt)
response = request.getresponse().read()
tree = ET.fromstring(response)
print >> Log, "API:", tree.find('.//{https://services.alarmnet.com/TC2/}ResultData').text
return
#---- EOL API Section ----#
def countPeople():
global peopleTotal
peopleTotal=0
cmd = subprocess.Popen('/usr/sbin/arp -a -i re0_vlan4', shell=True, stdout=subprocess.PIPE)
for line in cmd.stdout:
if device1 in line:
peopleTotal += 1
print >> Log, "User1 is present",peopleTotal
if device2 in line:
peopleTotal += 1
print >> Log, "User2 is present",peopleTotal
if device3 in line:
peopleTotal += 1
print >> Log, "User3 is present",peopleTotal
# cfgfile = open("Security.ini",'w')
# Config.set('Status','armed', True)
# Config.write(cfgfile)
# cfgfile.close()
return
# ---- BOL Program Initiation and function mapping ----#
def runcheck():
countPeople()
print state, peopleTotal
#Check ENV with if Statement to see if the "Armed" boolean is true or false
if now_time >= time(23,59) or now_time <= time(5,00):
if state == False and peopleTotal >0:
cfgfile = open("Security.ini",'w')
Config.set('Status','armed', True)
Config.write(cfgfile)
cfgfile.close()
TC2_ArmSecuritySystem(1)
print >> Log, "arming - It's now between 11:59AM and 5:30AM"
else:
if state is True and peopleTotal >0:
print >> Log, "disarming - more then 0"
TC2_DisarmSecuritySystem()
cfgfile = open("Security.ini",'w')
Config.set('Status','armed', False)
Config.write(cfgfile)
cfgfile.close()
print "Disarming", state
else:
if state is False and peopleTotal <=0:
print >> Log, "arming away - less then 1"
TC2_ArmSecuritySystem(0)
cfgfile = open("Security.ini",'w')
Config.set('Status','armed', True)
Config.write(cfgfile)
cfgfile.close()
print "Arming Away", state
return
runcheck()
# ---- EOL Program Initiation and function mapping ----#
#---- Logging ---- #
print >> Log, "- Armed",state,"-",peopleTotal,"DEVICES PRESENT","-"
Log.close()
#---- EOL for LOG Output ---- #
| 2.25
| 2
|
prm/relations/migrations/0002_activity.py
|
justaname94/innovathon2019
| 0
|
12633
|
# Generated by Django 2.2.5 on 2019-09-09 21:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('relations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Datetime on which the object was created.', verbose_name='created at ')),
('modified', models.DateTimeField(auto_now=True, help_text='Datetime on which the object was last modified.', verbose_name='modified at ')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('is_active', models.BooleanField(default=True, help_text='Are you currently actively doing it?', verbose_name='Is active')),
('last_time', models.DateField(blank=True, null=True, verbose_name='Last time done')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
]
| 1.882813
| 2
|
elliptic_moab/Mesh/MeshQL.py
|
padmec-reservoir/elliptic_moab
| 0
|
12634
|
<reponame>padmec-reservoir/elliptic_moab
from typing import Type
from elliptic.Kernel.Context import ContextDelegate
from .Selector import SelectorImplementation
from .Manager import ManagerImplementation
from .Computer import ComputerImplementation
class MeshQLImplementation(ComputerImplementation, ManagerImplementation, SelectorImplementation):
def base_delegate(self) -> Type[ContextDelegate]:
class BaseDelegate(ContextDelegate):
def get_template_file(self):
return 'base.pyx.etp'
def template_kwargs(self):
return {'declare_entityhandles': self.context.context['declare_entityhandle'],
'declare_ranges': self.context.context['declare_range'],
'declare_indexes': self.context.context['declare_index'],
'declare_variables': self.context.context['declare_variable'],
'declare_tags': set(self.context.context['declare_tags'])}
def context_enter(self):
pass
def context_exit(self):
pass
return BaseDelegate
| 2.140625
| 2
|
tests/test_quil.py
|
stjordanis/quantumflow
| 99
|
12635
|
<filename>tests/test_quil.py
# Copyright 2016-2018, Rigetti Computing
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
import pytest
import quantumflow as qf
QUIL_FILES = [
'hello_world.quil',
'empty.quil',
'classical_logic.quil',
'control_flow.quil',
'measure.quil',
'qaoa.quil',
'bell.quil',
# 'include.quil',
]
RUNNABLE_QUIL_FILES = QUIL_FILES[:-1]
def test_parse_quilfile():
print()
for quilfile in QUIL_FILES:
filename = 'tests/quil/'+quilfile
print("<<<"+filename+">>>")
with open(filename, 'r') as f:
quil = f.read()
qf.forest.quil_to_program(quil)
def test_run_quilfile():
print()
for quilfile in RUNNABLE_QUIL_FILES:
filename = 'tests/quil/'+quilfile
print("<<<"+filename+">>>")
with open(filename, 'r') as f:
quil = f.read()
prog = qf.forest.quil_to_program(quil)
prog.run()
def test_unparsable():
with pytest.raises(RuntimeError):
filename = 'tests/quil/unparsable.quil'
with open(filename, 'r') as f:
quil = f.read()
qf.forest.quil_to_program(quil)
| 2.375
| 2
|
gitool/util.py
|
eikendev/gitool
| 1
|
12636
|
<filename>gitool/util.py<gh_stars>1-10
import itertools
import logging
from git import Repo, InvalidGitRepositoryError
from .repository import Repository
logger = logging.getLogger("gitool")
def _list_repositories(path):
subdirectories = [p for p in path.iterdir() if p.is_dir()]
names = [p.name for p in subdirectories]
if '.git' not in names:
roots = [_list_repositories(p) for p in subdirectories]
roots = list(itertools.chain.from_iterable(roots))
else:
msg = "Discovered repository at '{}'."
logger.debug(msg.format(path))
roots = [path]
return roots
def get_repositories(path):
paths = _list_repositories(path)
repositories = list()
for p in paths:
try:
repo = Repo(str(p))
except InvalidGitRepositoryError:
msg = "'{}' is not a git repository."
logger.warning(msg.format(p))
continue
relative = p.relative_to(path)
repository = Repository(relative, repo)
repositories.append(repository)
repositories.sort()
return repositories
def list_properties(properties) -> str:
if len(properties) > 1:
return ', '.join(properties[:-1]) + ' and ' + properties[-1]
else:
return properties[0]
| 2.765625
| 3
|
taiga/projects/epics/serializers.py
|
threefoldtech/Threefold-Circles
| 0
|
12637
|
<reponame>threefoldtech/Threefold-Circles
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.base.api import serializers
from taiga.base.fields import Field, MethodField
from taiga.base.neighbors import NeighborsSerializerMixin
from taiga.mdrender.service import render as mdrender
from taiga.projects.attachments.serializers import BasicAttachmentsInfoSerializerMixin
from taiga.projects.mixins.serializers import OwnerExtraInfoSerializerMixin
from taiga.projects.mixins.serializers import ProjectExtraInfoSerializerMixin
from taiga.projects.mixins.serializers import AssignedToExtraInfoSerializerMixin
from taiga.projects.mixins.serializers import StatusExtraInfoSerializerMixin
from taiga.projects.notifications.mixins import WatchedResourceSerializer
from taiga.projects.tagging.serializers import TaggedInProjectResourceSerializer
from taiga.projects.votes.mixins.serializers import VoteResourceSerializerMixin
class EpicListSerializer(VoteResourceSerializerMixin, WatchedResourceSerializer,
OwnerExtraInfoSerializerMixin, AssignedToExtraInfoSerializerMixin,
StatusExtraInfoSerializerMixin, ProjectExtraInfoSerializerMixin,
BasicAttachmentsInfoSerializerMixin,
TaggedInProjectResourceSerializer, serializers.LightSerializer):
id = Field()
ref = Field()
project = Field(attr="project_id")
created_date = Field()
modified_date = Field()
subject = Field()
color = Field()
epics_order = Field()
client_requirement = Field()
team_requirement = Field()
version = Field()
watchers = Field()
is_blocked = Field()
blocked_note = Field()
is_closed = MethodField()
user_stories_counts = MethodField()
def get_is_closed(self, obj):
return obj.status is not None and obj.status.is_closed
def get_user_stories_counts(self, obj):
assert hasattr(obj, "user_stories_counts"), "instance must have a user_stories_counts attribute"
return obj.user_stories_counts
class EpicSerializer(EpicListSerializer):
comment = MethodField()
blocked_note_html = MethodField()
description = Field()
description_html = MethodField()
def get_comment(self, obj):
return ""
def get_blocked_note_html(self, obj):
return mdrender(obj.project, obj.blocked_note)
def get_description_html(self, obj):
return mdrender(obj.project, obj.description)
class EpicNeighborsSerializer(NeighborsSerializerMixin, EpicSerializer):
pass
class EpicRelatedUserStorySerializer(serializers.LightSerializer):
epic = Field(attr="epic_id")
user_story = Field(attr="user_story_id")
order = Field()
| 1.210938
| 1
|
src/TMDbApi/TMTranslationUnit.py
|
shasha79/nectm
| 3
|
12638
|
<gh_stars>1-10
#
# Copyright (c) 2020 <NAME>.
#
# This file is part of NEC TM
# (see https://github.com/shasha79/nectm).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import uuid
class TMTranslationUnit:
attributes = ['source_text', 'target_text',
'source_id', 'target_id',
'source_language', 'target_language',
'source_metadata', 'target_metadata', 'metadata',
'source_pos', 'target_pos',
'tuid', 'dirty_score', 'username',
'industry', 'type', 'file_name', 'domain', 'organization',
'tm_creation_date', 'tm_change_date',
'insert_date', 'update_date', 'check_date', 'check_version']
def __init__(self, sdict={}):
self.reset(sdict)
def reset(self, sdict):
# Initialize segment fields
for attr in self.attributes:
val = None if not attr in sdict else sdict[attr]
setattr(self, attr, val)
# allocate ids
self._allocate_id('source')
self._allocate_id('target')
def _allocate_id(self, type):
text = getattr(self, type + '_text')
if text:
setattr(self, type + '_id', uuid.uuid5(uuid.NAMESPACE_URL, text))
def to_dict(self):
return dict([(a, getattr(self, a)) for a in self.attributes])
def to_dict_short(self):
return dict([(a, getattr(self, a)) for a in ['source_text', 'target_text', 'source_metadata', 'target_metadata'] if getattr(self, a)])
| 1.78125
| 2
|
tests/test_268.py
|
sungho-joo/leetcode2github
| 0
|
12639
|
#!/usr/bin/env python
import pytest
"""
Test 268. Missing Number
"""
@pytest.fixture(scope="session")
def init_variables_268():
from src.leetcode_268_missing_number import Solution
solution = Solution()
def _init_variables_268():
return solution
yield _init_variables_268
class TestClass268:
def test_solution_0(self, init_variables_268):
assert init_variables_268().missingNumber([3, 0, 1]) == 2
def test_solution_1(self, init_variables_268):
assert init_variables_268().missingNumber([0, 1]) == 2
def test_solution_2(self, init_variables_268):
assert init_variables_268().missingNumber([9, 6, 4, 2, 3, 5, 7, 0, 1]) == 8
def test_solution_3(self, init_variables_268):
assert init_variables_268().missingNumber([0]) == 1
| 2.859375
| 3
|
tests/db/test_connector.py
|
DaWeSearch/backend
| 1
|
12640
|
import unittest
import os
import json
from functions.db.connector import *
from functions.db.models import *
from functions.authentication import *
sample_search = {
"search_groups": [
{
"search_terms": ["blockchain", "distributed ledger"],
"match": "OR"
},
{
"search_terms": ["energy", "infrastructure", "smart meter"],
"match": "OR"
}
],
"match": "AND"
}
db_dict = {"db_name": "hallo", "api_key": "test"}
class TestConnector(unittest.TestCase):
def setUp(self):
name = "test_review"
self.review = add_review(name)
self.sample_query = new_query(self.review, sample_search)
with open('test_results.json', 'r') as file:
self.results = json.load(file)
save_results(self.results['records'], self.review, self.sample_query)
def test_add_review(self):
name = "test_review"
new_review = add_review(name)
review = get_review_by_id(new_review._id)
review.delete()
self.assertEqual(review._id, new_review._id)
def test_save_results(self):
query = new_query(self.review, sample_search)
jsonpath = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "..", "test_results.json"))
with open(jsonpath, 'r') as file:
results = json.load(file)
save_results(results['records'], self.review, query)
results_from_db = get_persisted_results(query).get('results')
self.assertEqual(len(results_from_db), len(results['records']))
def test_pagination(self):
page1 = get_persisted_results(self.sample_query, 1, 10).get('results')
self.assertTrue(len(page1) == 10)
page2 = get_persisted_results(self.sample_query, 2, 10).get('results')
self.assertTrue(len(page2) == 10)
self.assertNotEqual(page1, page2)
def test_get_list_of_dois_for_review(self):
dois = get_dois_for_review(self.review)
for record in self.results.get('records'):
self.assertTrue(record.get('doi') in dois)
def test_update_score(self):
user = User(name="test user")
doi = self.results.get('records')[0].get('doi')
result = get_result_by_doi(self.review, doi)
self.assertEqual(len(result.scores), 0)
evaluation = {
"user": "testmann",
"score": 2,
"comment": "test_comment"
}
update_score(self.review, result, evaluation)
self.assertEqual(result.scores[0].score, 2)
evaluation = {
"user": "testmann",
"score": 5,
"comment": "joiefjlke"
}
update_score(self.review, result, evaluation)
self.assertEqual(result.scores[0].score, 5)
self.assertEqual(len(result.scores), 1)
user.delete()
def test_delete_results_for_review(self):
num_results = len(get_dois_for_review(self.review))
self.assertGreater(num_results, 0)
delete_results_for_review(self.review)
num_results = len(get_dois_for_review(self.review))
self.assertEquals(num_results, 0)
def tearDown(self):
delete_results_for_review(self.review)
self.review.delete()
class TestUserDB(unittest.TestCase):
# TODO rewrite test cases
def setUp(self):
username = "philosapiens"
name = "Philippe"
surname = "Kalinowski"
email = "<EMAIL>"
password = "<PASSWORD>"
# databases = DatabaseInfo()
# databases.name = "SPRINGER_API"
# databases.api_key = "5150230aac7a227ve33693f99b5697aa"
# self.user = add_user(username, name, surname, email, password)
def test_add_user(self):
username = "philosapfiens"
name = "Philippe"
surname = "Kalinowski"
email = "<EMAIL>"
password = "<PASSWORD>"
db_name = "SPRINGER_API"
api_key = "5150230aac7a227ve33693f99b5697aa"
# databases312 = DatabaseInfo.from_document(sample_databases)
# print(databases312)
new_user = add_user(username, name, surname, email, password)
# update_databases(new_user, db_dict)
# user = get_user_by_id(new_user.name)
def test_get_user_by_username(self):
user = get_user_by_username("philosapiens")
print(user.email)
def test_update_user(self):
user = get_user_by_username("philosapiens")
print(user.email)
update_user(user, user.name, "btesfd", "<EMAIL>", user.password)
user = get_user_by_username("philosapiens")
print(user.email)
def test_get_all_users(self):
print(str(get_users()))
def test_delete_users(self):
user = get_user_by_username("philosapiens")
delete_user(user)
class TestAuth(unittest.TestCase):
def setUp(self):
username = "philosapiens"
name = "Philippe"
surname = "Kalinowski"
email = "<EMAIL>"
password = "<PASSWORD>"
def test_login(self):
username = "philosapiens"
password = "<PASSWORD>"
user = get_user_by_username(username)
password_correct = check_if_password_is_correct(user, password)
print(password_correct)
token = get_jwt_for_user(user)
print(type(token))
add_jwt_to_session(user, token)
is_token_valid = check_for_token(token)
print(is_token_valid)
is_token_in_session = check_if_jwt_is_in_session(token)
print(is_token_in_session)
# remove_jwt_from_session(user)
if __name__ == '__main__':
unittest.main()
| 2.65625
| 3
|
tests/test_capstone.py
|
GrammaTech/gtirb-capstone
| 6
|
12641
|
<gh_stars>1-10
# Copyright (C) 2020 GrammaTech, Inc.
#
# This code is licensed under the MIT license. See the LICENSE file in
# the project root for license terms.
#
# This project is sponsored by the Office of Naval Research, One Liberty
# Center, 875 N. Randolph Street, Arlington, VA 22203 under contract #
# N68335-17-C-0700. The content of the information does not necessarily
# reflect the position or policy of the Government and no official
# endorsement should be inferred.
#
import pytest
import gtirb
import gtirb_capstone
@pytest.mark.commit
def test_insert_bytes():
ir = gtirb.IR()
m = gtirb.Module(
name="test",
isa=gtirb.Module.ISA.X64,
byte_order=gtirb.Module.ByteOrder.Little,
)
m.ir = ir
s = gtirb.Section(name=".text")
s.module = m
bi = gtirb.ByteInterval(
contents=b"\x00\x01\x02\x03\x04\x05\x06\x07", address=0x1000
)
bi.section = s
b = gtirb.CodeBlock(offset=2, size=2)
b.byte_interval = bi
b2 = gtirb.DataBlock(offset=6, size=2)
b2.byte_interval = bi
bi.symbolic_expressions[6] = gtirb.SymAddrConst(0, None)
ctx = gtirb_capstone.RewritingContext(ir)
ctx.modify_block_insert(m, b, b"\x08\x09", 1)
assert bi.address == 0x1000
assert bi.size == 10
assert bi.contents == b"\x00\x01\x02\x08\x09\x03\x04\x05\x06\x07"
assert b.offset == 2
assert b.size == 4
assert b2.offset == 8
assert b2.size == 2
assert 6 not in bi.symbolic_expressions
assert 8 in bi.symbolic_expressions
| 1.9375
| 2
|
python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_min_op_npu.py
|
L-Net-1992/Paddle
| 0
|
12642
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
import os
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.contrib.mixed_precision.amp_nn as amp_nn
from test_update_loss_scaling_op_npu import TestUpdateLossScalingOpBad
paddle.enable_static()
SEED = 2021
class TestUpdateLossScalingOpMinLossScalingBad(TestUpdateLossScalingOpBad):
def setUp(self):
self.set_npu()
self.op_type = "update_loss_scaling"
self.place = paddle.NPUPlace(0)
self.init()
fluid.core.globals()['FLAGS_min_loss_scaling'] = 1639
found_inf = np.array([True], dtype=np.bool_)
x = np.random.random((1024, 1024)).astype(self.dtype)
i = np.random.randint(0, 1024, 1)
j = np.random.randint(0, 1024, 1)
x[i[0]][j[0]] = np.inf
self.inputs = {
'X': [('x0', x)],
'FoundInfinite': found_inf,
'PrevLossScaling': self.prev_loss_scaling,
'InGoodSteps': self.num_good_steps,
'InBadSteps': self.num_bad_steps
}
self.outputs = {
'Out': [('out0', np.zeros_like(x))],
'LossScaling': np.array([1639.0]).astype(self.dtype),
'OutGoodSteps': self.zero_steps,
'OutBadSteps': self.zero_steps
}
def init(self):
self.incr_ratio = 2.0
self.decr_ratio = 0.8
self.dtype = np.float32
self.prev_loss_scaling = np.array([2048]).astype(self.dtype)
self.num_good_steps = np.array([999], dtype=np.int32)
self.num_bad_steps = np.array([1], dtype=np.int32)
self.zero_steps = np.array([0], dtype=np.int32)
self.attrs = {
'incr_every_n_steps': 1000,
'decr_every_n_nan_or_inf': 2,
'incr_ratio': self.incr_ratio,
'decr_ratio': self.decr_ratio,
}
if __name__ == '__main__':
unittest.main()
| 2.03125
| 2
|
Examples/WorkingWithMimeMessages/SetEmailHeaders.py
|
Muzammil-khan/Aspose.Email-Python-Dotnet
| 5
|
12643
|
import aspose.email as ae
import datetime
def run():
# The path to the File directory.
dataDir = "Data/"
#ExStart: SetEmailHeaders
# Create an instance of MailMessage class
eml = ae.MailMessage()
# Specify ReplyTo, From, To field, Cc and Bcc Addresses
eml.reply_to_list.Add("<EMAIL>")
eml.from_address = "<EMAIL>"
eml.to.append(ae.MailAddress("<EMAIL>", "Recipient 1"))
eml.to.append(ae.MailAddress("<EMAIL>", "Recipient 2"))
eml.cc.append(ae.MailAddress("<EMAIL>", "Recipient 3"))
eml.bcc.append(ae.MailAddress("<EMAIL>", "Recipient 4"))
# Specify Date, Message subject, XMailer, Secret Header, Save message to disc
eml.subject = "test mail"
eml.date = datetime.datetime(2006, 3, 6, 12, 00)
eml.xmailer = "Aspose.Email"
eml.headers.Add("secret-header", "mystery")
eml.save(dataDir + "SetEmailHeaders_out.msg", ae.SaveOptions.default_msg)
#ExEnd: SetEmailHeaders
if __name__ == '__main__':
run()
| 2.96875
| 3
|
intrinsic/classify.py
|
seenu-andi-rajendran/plagcomps
| 2
|
12644
|
# classify.py
# Alternative methods to clustering
import sys, os
from random import shuffle
import cPickle
from collections import Counter
sys.path.append('../pybrain/') # add the pybrain module to the path... TODO: actually install it.
from plagcomps.shared.util import IntrinsicUtility
from ..dbconstants import username
from ..dbconstants import password
from ..dbconstants import dbname
'''
from pybrain.structure import FeedForwardNetwork, LinearLayer, SigmoidLayer, FullConnection, TanhLayer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.utilities import percentError
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from pybrain.tools.customxml.networkwriter import NetworkWriter
from pybrain.tools.customxml.networkreader import NetworkReader
from pybrain.structure.modules import BiasUnit
'''
import scipy
import sklearn
import sklearn.metrics
import matplotlib
import matplotlib.pyplot as pyplot
from pylab import ion, ioff, figure, draw, contourf, clf, show, hold, plot
from scipy import diag, arange, meshgrid, where
from numpy.random import multivariate_normal
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class NeuralNetworkConfidencesClassifier:
nn_filepath = os.path.join(os.path.dirname(__file__), "neural_networks/nn.xml")
dataset_filepath = os.path.join(os.path.dirname(__file__), "neural_networks/dataset.pkl")
def create_nn(self, features, num_hidden_layer_nodes):
net = buildNetwork(len(features), num_hidden_layer_nodes, 1)
return net
def create_trainer(self, network, dataset):
trainer = BackpropTrainer(network, dataset, learningrate=0.01, momentum=0.01, verbose=True)
return trainer
def roc(self, confidences, actuals):
fpr, tpr, thresholds = sklearn.metrics.roc_curve(actuals, confidences, pos_label=1)
roc_auc = sklearn.metrics.auc(fpr, tpr)
print 'ROC area under curve:', roc_auc
# The following code is from http://scikit-learn.org/stable/auto_examples/plot_roc.html
pyplot.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
pyplot.plot([0, 1], [0, 1], 'k--')
pyplot.xlim([0.0, 1.0])
pyplot.ylim([0.0, 1.0])
pyplot.xlabel('False Positive Rate')
pyplot.ylabel('True Positive Rate')
pyplot.title('Receiver operating characteristic')
pyplot.legend(loc="lower right")
#path = "figures/roc"+str(time.time())+".pdf"
path = ospath.join(ospath.dirname(__file__), "neural_networks/roc"+str(time.time())+".pdf")
pyplot.savefig(path)
return path, roc_auc
def construct_confidence_vectors_dataset(self, reduced_docs, features, session):
from cluster import cluster
conf_dataset = SupervisedDataSet(len(features), 1)
confidence_vectors = []
num_trues = 0
for feature in features:
vi = 0
for doc in reduced_docs:
feature_vectors = doc.get_feature_vectors([feature], session)
confidences = cluster("outlier", 2, feature_vectors, center_at_mean=True, num_to_ignore=1, impurity=.2)
for i, confidence in enumerate(confidences, 0):
if len(confidence_vectors) <= vi:
confidence_vectors.append([[], 0])
if doc.span_is_plagiarized(doc._spans[i]):
t = 1
num_trues += 1
else:
t = 0
confidence_vectors[vi][0].append(confidence)
confidence_vectors[vi][1] = t
vi += 1
num_plagiarised = num_trues / len(features)
print num_plagiarised
shuffle(confidence_vectors)
for vec in confidence_vectors:
if vec[1] == 0:
num_plagiarised -= 1
if not (vec[1] == 0 and num_plagiarised <= 0):
conf_dataset.addSample(vec[0], vec[1])
f = open(self.dataset_filepath, 'wb')
cPickle.dump(conf_dataset, f)
print 'dumped dataset file'
return conf_dataset
def read_dataset(self):
f = open(self.dataset_filepath, 'rb')
return cPickle.load(f)
def construct_and_train_nn(self, features, num_files, epochs, filepath, session):
from plagcomps.evaluation.intrinsic import _get_reduced_docs
IU = IntrinsicUtility()
all_test_files = IU.get_n_training_files(n=num_files)
reduced_docs = _get_reduced_docs("paragraph", all_test_files, session)
print 'constructing datasets...'
# dataset = self.construct_confidence_vectors_dataset(reduced_docs, features, session)
dataset = self.read_dataset()
training_dataset, testing_dataset = dataset.splitWithProportion(0.75)
print 'dataset lengths:', len(dataset), len(training_dataset), len(testing_dataset)
print
print 'creating neural network...'
net = self.create_nn(features, num_hidden_layer_nodes)
print 'creating trainer...'
trainer = self.create_trainer(net, training_dataset)
print 'training neural network for', epochs, 'epochs...'
trainer.trainEpochs(epochs)
print 'writing neural network to ' + str(filepath) + '...'
NetworkWriter.writeToFile(net, filepath)
print 'testing neural network...'
confidences = []
actuals = []
for point in testing_dataset:
confidences.append(net.activate(point[0])[0])
actuals.append(point[1][0])
print 'confidences|actuals ', zip(confidences, actuals)
print 'generating ROC curve...'
matplotlib.use('pdf')
path, auc = self.roc(confidences, actuals)
print 'area under curve =', auc
def nn_confidences(self, feature_vectors):
'''
Read the saved nn and run it.
'''
net = NetworkReader.readFrom(self.nn_filepath)
confidences = []
for feature_vector in feature_vectors:
confidences.append(net.activate(feature_vector)[0])
return confidences
# an Engine, which the Session will use for connection resources
url = "postgresql://%s:%s@%s" % (username, password, dbname)
engine = sqlalchemy.create_engine(url)
# create tables if they don't already exist
Base.metadata.create_all(engine)
# create a configured "Session" class
Session = sessionmaker(bind=engine)
if __name__ == '__main__':
session = Session()
features = ['average_sentence_length',
'average_syllables_per_word',
'avg_external_word_freq_class',
'avg_internal_word_freq_class',
'flesch_kincaid_grade',
'flesch_reading_ease',
'num_chars',
'punctuation_percentage',
'stopword_percentage',
'syntactic_complexity',
'syntactic_complexity_average']
num_hidden_layer_nodes = 20
num_files = 30
epochs = 400
filepath = os.path.join(os.path.dirname(__file__), "neural_networks/nn.xml")
NN = NeuralNetworkConfidencesClassifier()
NN.construct_and_train_nn(features, num_files, epochs, filepath, session)
| 2.140625
| 2
|
matplotlib-3.4.3/matplotlib-3.4.3/examples/images_contours_and_fields/image_transparency_blend.py
|
JohnLauFoo/clc_packages_Yu
| 1
|
12645
|
<reponame>JohnLauFoo/clc_packages_Yu
"""
==========================================
Blend transparency with color in 2D images
==========================================
Blend transparency with color to highlight parts of data with imshow.
A common use for `matplotlib.pyplot.imshow` is to plot a 2D statistical
map. The function makes it easy to visualize a 2D matrix as an image and add
transparency to the output. For example, one can plot a statistic (such as a
t-statistic) and color the transparency of each pixel according to its p-value.
This example demonstrates how you can achieve this effect.
First we will generate some data, in this case, we'll create two 2D "blobs"
in a 2D grid. One blob will be positive, and the other negative.
"""
# sphinx_gallery_thumbnail_number = 3
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
def normal_pdf(x, mean, var):
return np.exp(-(x - mean)**2 / (2*var))
# Generate the space in which the blobs will live
xmin, xmax, ymin, ymax = (0, 100, 0, 100)
n_bins = 100
xx = np.linspace(xmin, xmax, n_bins)
yy = np.linspace(ymin, ymax, n_bins)
# Generate the blobs. The range of the values is roughly -.0002 to .0002
means_high = [20, 50]
means_low = [50, 60]
var = [150, 200]
gauss_x_high = normal_pdf(xx, means_high[0], var[0])
gauss_y_high = normal_pdf(yy, means_high[1], var[0])
gauss_x_low = normal_pdf(xx, means_low[0], var[1])
gauss_y_low = normal_pdf(yy, means_low[1], var[1])
weights = (np.outer(gauss_y_high, gauss_x_high)
- np.outer(gauss_y_low, gauss_x_low))
# We'll also create a grey background into which the pixels will fade
greys = np.full((*weights.shape, 3), 70, dtype=np.uint8)
# First we'll plot these blobs using ``imshow`` without transparency.
vmax = np.abs(weights).max()
imshow_kwargs = {
'vmax': vmax,
'vmin': -vmax,
'cmap': 'RdYlBu',
'extent': (xmin, xmax, ymin, ymax),
}
fig, ax = plt.subplots()
ax.imshow(greys)
ax.imshow(weights, **imshow_kwargs)
ax.set_axis_off()
###############################################################################
# Blending in transparency
# ========================
#
# The simplest way to include transparency when plotting data with
# `matplotlib.pyplot.imshow` is to pass an array matching the shape of
# the data to the ``alpha`` argument. For example, we'll create a gradient
# moving from left to right below.
# Create an alpha channel of linearly increasing values moving to the right.
alphas = np.ones(weights.shape)
alphas[:, 30:] = np.linspace(1, 0, 70)
# Create the figure and image
# Note that the absolute values may be slightly different
fig, ax = plt.subplots()
ax.imshow(greys)
ax.imshow(weights, alpha=alphas, **imshow_kwargs)
ax.set_axis_off()
###############################################################################
# Using transparency to highlight values with high amplitude
# ==========================================================
#
# Finally, we'll recreate the same plot, but this time we'll use transparency
# to highlight the extreme values in the data. This is often used to highlight
# data points with smaller p-values. We'll also add in contour lines to
# highlight the image values.
# Create an alpha channel based on weight values
# Any value whose absolute value is > .0001 will have zero transparency
alphas = Normalize(0, .3, clip=True)(np.abs(weights))
alphas = np.clip(alphas, .4, 1) # alpha value clipped at the bottom at .4
# Create the figure and image
# Note that the absolute values may be slightly different
fig, ax = plt.subplots()
ax.imshow(greys)
ax.imshow(weights, alpha=alphas, **imshow_kwargs)
# Add contour lines to further highlight different levels.
ax.contour(weights[::-1], levels=[-.1, .1], colors='k', linestyles='-')
ax.set_axis_off()
plt.show()
ax.contour(weights[::-1], levels=[-.0001, .0001], colors='k', linestyles='-')
ax.set_axis_off()
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.axes.Axes.imshow` / `matplotlib.pyplot.imshow`
# - `matplotlib.axes.Axes.contour` / `matplotlib.pyplot.contour`
# - `matplotlib.colors.Normalize`
# - `matplotlib.axes.Axes.set_axis_off`
| 3.5625
| 4
|
tests/test_admin.py
|
FernandoCelmer/django-global-permissions
| 30
|
12646
|
<reponame>FernandoCelmer/django-global-permissions
from django.test import TestCase
from django.core.urlresolvers import reverse
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User
class GlobalPermissionsAdminTest(TestCase):
def setUp(self):
User.objects.create_superuser(username='ham', password='<PASSWORD>', email='<EMAIL>')
self.client.login(username='ham', password='<PASSWORD>')
def test_admin_simply_works(self):
resp = self.client.get(reverse('admin:global_permissions_globalpermission_changelist'))
self.assertEqual(200, resp.status_code)
| 2.3125
| 2
|
Models/utils.py
|
weslai/ecg_classification
| 1
|
12647
|
<reponame>weslai/ecg_classification
import matplotlib.pyplot as plt
import itertools
import numpy as np
from sklearn.metrics import confusion_matrix
## be used to evaluate the model
def evaluate_model(history, X_test, y_test, model):
scores = model.evaluate((X_test), y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] *100))
print(history)
fig1, ax_acc = plt.subplots()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Model - Accuracy')
plt.legend(['Training', 'Validation'], loc='lower right')
plt.show()
fig2, ax_loss = plt.subplots()
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Model- Loss')
plt.legend(['Training', 'Validation'], loc='upper right')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.show()
## put the return to the plot_confusion_matrix
def confustion_matrix(true_label, val_prediction):
cnf_matrix = confusion_matrix(true_label.argmax(axis=1), val_prediction.argmax(axis=1))
return cnf_matrix
## confusion matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
np.set_printoptions(precision=2)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.figure(figsize=(10, 10))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
| 3.34375
| 3
|
evapotranspiration/penman_monteith_daily.py
|
JRoehrig/evapotranspiration
| 2
|
12648
|
import math
import numpy as np
import pandas as pd
class PenmanMonteithDaily(object):
r"""The class *PenmanMonteithDaily* calculates daily potential evapotranspiration according to the Penman-Monteith
method as described in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_ (Allen et al.,
1998). Reference evapotranspiration for a hypothetical grass reference crop (:math:`h=12` *cm*;
:math:`albedo=0.23`, and :math:`LAI=2.88`) is calculated by default. Wind and humidity observations at 2 meters
height as well as soil heat flux density :math:`G=0.0` *MJ/m²day* are also assumed by default.
Default values can be changed in the keyword arguments (`**kwargs`) described below.
The class *PenmanMonteithDaily* solves equation 3 in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_:
.. math::
ET = \frac{\Delta (R_n - G) + \rho_a c_p \frac{e_s - e_a}{r_a}}
{\lambda \left[ \Delta + \gamma \left( 1 + \frac{r_s}{r_a} \right) \right]}
\tag{eq. 3, p. 19}
:param elevation: elevation above sea level (*z*) *[m]*. Used in :meth:`clear_sky_shortwave_radiation` and
:meth:`atmospheric_pressure`
:type elevation: float
:param latitude: latitude (:math:`\varphi`) *[decimal degrees]*. Used in :meth:`sunset_hour_angle` and
:meth:`extraterrestrial_radiation`
:type latitude: float
:Keyword Arguments:
* **albedo** (*float*) - albedo or canopy reflection coefficient (:math:`\alpha`) *[-]*.
Range: :math:`0.0 \leq \alpha \leq 1.0`. Default :math:`albedo=0.23` for the hypothetical grass
reference crop. Used in :meth:`net_shortwave_radiation`
* **h** (*float*) - crop height (*h*) *[m]*. Default :math:`h=0.12` for the hypothetical grass reference
crop. Required to calculate the zero plane displacement height (:math:`d`) *[m]* and the roughness length
governing momentum (:math:`z_{om}`) *[m]*, both necessary for the aerodynamic resistance (:math:`r_a`) *[s/m]*.
See :meth:`aerodynamic_resistance_factor`
* **lai** (*float*) - leaf area index (:math:`LAI`) *[-]*. Default :math:`lai=2.88` for the hypothetical
grass reference crop. See *BOX 5* in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_ and
:meth:`bulk_surface_resistance`
* **rl** (*float*) - bulk stomatal resistance of well-illuminated leaf (:math:`r_l`) *[s/m]*. Default
:math:`rl=100.0` for any crop. See :meth:`bulk_surface_resistance`
* **zm** (*float*) - height of wind measurements (:math:`z_m`) *[m]*. Default :math:`zm=2.0`. Required to
calculate aerodynamic resistance (:math:`r_a`) *[s/m]*. See :meth:`aerodynamic_resistance_factor`
* **zh** (*float*) - height of humidity measurements (:math:`z_h`) *[m]*. Default :math:`zh=2.0`. Required to
calculate aerodynamic resistance (:math:`r_a`) *[s/m]*. See :meth:`aerodynamic_resistance_factor`
* **g** (*float*) - soil heat flux density (:math:`G`) *[MJ/m²day]*. Default :math:`g=0.0`. This
corresponds to :math:`G` in eq. 3, p. 19 above. It can be also given with daily parameters in :meth:`et0`
.. note::
Only :attr:`elevation` and :attr:`latitude` are mandatory parameters of :meth:`PenmanMonteithDaily()`.
:attr:`albedo`, :attr:`h`, and :attr:`lai` are only necessary when calculating evapotranspiration for crops
other than reference grass.
:ivar doy: day of year *[-]*
:ivar z: elevation in meters above sea level (*z*) *[m]*
:ivar p: atmospheric pressure (*P*) *[kPa]*
:ivar u2: wind speed at height :math:`z` (:math:`u_2`) *[m/s]*
:ivar ld: latent heat of vaporization (:math:`\lambda`) *[MJ/kg]*. See :meth:`latent_heat_of_vaporization()`
:ivar s: slope of saturation vapour pressure curve (:math:`\Delta`) *[kPa/°C]*.
See :meth:`slope_of_saturation_vapour_pressure_curve()`
:ivar psych: psychrometric constant (:math:`\gamma`) *[kPa/°C]*. See :meth:`psychrometric_constant()`
:ivar mn: daylight hours (:math:`N`) *[hours]*. See :meth:`daylight_hours()`
:ivar es: saturation vapour pressure (:math:`e_s`) *[kPa]*. See :meth:`saturation_vapour_pressure()`
:ivar ea: actual vapour pressure (:math:`e_a`) *[kPa]*. See :meth:`actual_vapour_pressure()`
:ivar ra: daily extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]*. See :meth:`extraterrestrial_radiation()`
:ivar rs: daily shortwave radiation (:math:`R_s`) *[MJ/m²day]*. See :meth:`shortwave_radiation()`
:ivar rs0: clear-sky shortwave radiation (:math:`R_{so}`) *[MJ/m²day]*.
See :meth:`clear_sky_shortwave_radiation()`
:ivar rns: net shortwave radiation (:math:`R_{ns}`) *[MJ/m²day]*. See :meth:`net_shortwave_radiation()`
:ivar rnl: net outgoing longwave radiation (:math:`R_{nl}`) *[MJ/m²day]*. See :meth:`net_longwave_radiation()`
:ivar rn: net radiation (:math:`R_{n}`) *[MJ/m²day]*. :math:`R_{n} = R_{ns} - R_{nl}`
:ivar etr: radiation component of reference evapotranspiration *[mm/day]*
:ivar etw: wind component of reference evapotranspiration *[mm/day]*
:ivar et: reference evapotranspiration *[mm/day]*
Object Constants:
* **e** - ratio molecular weight of water vapour/dry air (:math:`\varepsilon`) *[-]*.
:math:`e = 0.622`
* **r** - specific gas constant *[kJ/kg.K]*. :math:`r = 0.287`
* **k** - von Karman constant (:math:`k`) *[-]*, see
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_ eq. 4.
:math:`k=0.41`
Object crop specific factors:
* **d_factor** - factor of the zero plane displacement height (:math:`d`) *[-]*. :math:`d\_factor = 2.0 / 3.0`
* **zom_factor** - factor of the roughness length governing momentum transfer (:math:`z_{om}`) *[-]*.
:math:`zom\_factor = 0.123`
* **zoh_factor** - factor of the roughness length governing transfer of heat and vapour (:math:`z_{oh}`) *[-]*.
:math:`zoh\_factor = 0.1`
* **lai_active_factor** - factor of the active (sunlit) leaf area index (:math:`LAI_{active}`) *[-]* (it
considers that generally only the upper half of dense clipped grass is actively contributing to the surface
heat and vapour transfer). :math:`lai\_active\_factor = 0.5`
Calculation with :meth:`et0`::
- pm = PenmanMonteithDaily(elevation, latitude, ...)
- et0 = pm.et0(...)
Calculation with :meth:`et0_frame` given a *pandas.DataFrame()* as input parameter::
- pm = PenmanMonteithDaily(elevation, latitude, ...)
- df = pm.et0_frame(df, ...)
"""
def __init__(self, elevation, latitude, **kwargs):
self.albedo = kwargs.get('albedo', 0.23) # albedo
self.h = kwargs.get('h', 0.12) # crop height h [m]
self.zm = kwargs.get('zm', 2.0) # height of wind measurements [m]
self.zh = kwargs.get('zh', 2.0) # roughness length governing transfer of heat and vapour [m]
self.lai = kwargs.get('lai', 2.88) # LAI dependence
self.rl = kwargs.get('rl', 100.0) # The stomatal resistance
self.g_default = kwargs.get('g', 0.0) # soil heat flux density [MJ/m²day]
self.doy = None
self.u2 = None
self.ld = None
self.s = None
self.pc = None
self.mn = None
self.es = None
self.ea = None
self.ra = None
self.rs = None
self.rs0 = None
self.rns = None
self.rnl = None
self.rn = None
self.etr = None
self.etw = None
self.et = None
self.e = 0.622
self.r = 0.287
self.k = 0.41
self.d_factor = 2.0 / 3.0
self.zom_factor = 0.123
self.zoh_factor = 0.1
self.lai_active_factor = 0.5
if latitude:
days = np.array(range(367))
latitude = float(np.radians(latitude))
dr_366 = self.inverse_relative_distance_earth_sun(days)
sd_366 = np.array([self.solar_declination(day) for day in range(367)])
ws_366 = np.array([self.sunset_hour_angle(latitude, s) for s in sd_366])
self.daylight_hours_366 = np.array([PenmanMonteithDaily.daylight_hours(w) for w in ws_366])
self.ra_366 = np.array([self.extraterrestrial_radiation(
dr_366[i], ws_366[i], latitude, sd_366[i]) for i in range(len(dr_366))])
self.rs0_366 = np.array([self.clear_sky_shortwave_radiation(
ra, elevation=elevation) for ra in self.ra_366])
else:
self.daylight_hours_366 = None
self.ra_366 = None
self.rs0_366 = None
self.z = elevation
self.p = PenmanMonteithDaily.atmospheric_pressure(self.z)
ra_factor = self.aerodynamic_resistance_factor()
self.f1 = 86400 * self.e / (1.01 * self.r * ra_factor)
"""f1 = (specific heat at constant pressure) * (mean air density at constant pressure) /
(1.01 * :attr:`r` * :meth:`aerodynamic_resistance_factor`).
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_ Box 6
"""
self.f2 = self.bulk_surface_resistance() / ra_factor
r""":math:`f_1 = \frac{rs}{f_{ra}}` with :math:`f_{ra}` = :meth:`aerodynamic_resistance_factor`"""
def reset(self):
r"""Reset the following output attributes before calculating :math:`ETo`: :math:`doy`, :math:`u2`,
:math:`ld`, :math:`s`, :math:`pc`, :math:`mn`, :math:`es`, :math:`ea`, :math:`ra`,
:math:`rs`, :math:`rs0`, :math:`rns`, :math:`rnl`, :math:`rn`, :math:`etr`, :math:`etw`, and :math:`et`
"""
self.doy = None
self.u2 = None
self.ld = None
self.s = None
self.pc = None
self.mn = None
self.es = None
self.ea = None
self.ra = None
self.rs = None
self.rs0 = None
self.rns = None
self.rnl = None
self.rn = None
self.etr = None
self.etw = None
self.et = None
@staticmethod
def atmospheric_pressure(z):
r""" Return the atmospheric pressure (:math:`P`) *[kPa]* as a function of the elevation above sea level as
defined in `FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 7, p. 31):
.. math::
P = 101.3\left(\frac{293-0.0065z}{293}\right)^{5.26}
The atmospheric pressure (:math:`P`) is the pressure exerted by the weight of the earth's atmosphere.
Evaporation at high altitudes is promoted due to low atmospheric pressure as expressed in the psychrometric
constant. The effect is, however, small and in the calculation procedures, the average value for a location
is sufficient. A simplification of the ideal gas law, assuming :math:`20` *°C* for a standard atmosphere,
can be employed to calculate :math:`P`
(`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_).
:param z: elevation above sea level *[m]*
:type z: float or np.array
:return: (*float or np.array*) atmospheric pressure (:math:`P`) *[kPa]*
"""
return 101.3 * ((293.0 - 0.0065 * z) / 293.0) ** 5.26
@staticmethod
def latent_heat_of_vaporization(temperature=20):
r"""Return the latent heat of vaporization (:math:`\lambda`) *[MJ/kg]* as described in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(Annex 3, eq. 3-1, p. 223):
.. math::
\lambda = 2.501-(2.361 * 10^{-3})T
:param temperature: air temperature (:math:`T`) *[°C]*. Default :math:`temperature=20`
:type temperature: float or np.array
:return: (*float or np.array*) latent heat of vaporization (:math:`\lambda`) *[MJ/kg]*.
Default :math:`\lambda=2.45378`
"""
return 2.501 - 2.361e-3 * temperature
@staticmethod
def psychrometric_constant(p, **kwargs):
r"""Return the psychrometric constant (:math:`\gamma`) *[kPa/°C]* according to
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
eq. 8, p. 32:
.. math::
\gamma = \frac{c_p P}{\varepsilon \lambda}
or, using default values:
.. math::
\gamma = a_{psy} \cdot P
:param p: atmospheric pressure (:math:`P`) *[kPa]*
:type p: float or np.array
:Keyword Arguments:
* **lamda** (*float*) - latent heat of vaporization (:math:`\lambda`) *[MJ/kg]*. Default :math:`lamda=2.45`.
See Used in :meth:`latent_heat_of_vaporization`
* **cp** (*float*) - specific heat at constant pressure (:math:`c_p`) *[MJ/kg]*. Default
:math:`cp=1.013e^{-3}`
* **epsilon** (*float*) - ratio molecular weight of water vapour/dry air (:math:`\epsilon`) *[-]*.
Default :math:`epsilon=0.622`
* **a_psy** (*float*) - coefficient depending on the type of the ventilation of the bulb *[1/°C]*. Examples:
* :math:`a_{psy} = 0.000665` (default)
* :math:`a_{psy} = 0.000662` for ventilated (Asmann type) psychrometers, with an air movement of some 5
*m/s*
* :math:`a_{psy} = 0.000800` for natural ventilated psychrometers (about 1 *m/s*)
* :math:`a_{psy} = 0.001200` for non-ventilated psychrometers installed indoors
The method uses :math:`a_{psy}` if given, otherwise eq. 8 (see above) with given or default values. Default
values correspond to :math:`a_{psy} = 0.000665` as argument.
:return: (*float or np.array*) psychrometric constant (:math:`\gamma`) *[kPa/°C]*
"""
if 'a_psy' in kwargs:
return kwargs.get('a_psy', 0.000665) * p
else:
return (kwargs.get('cp', 1.013e-3) * p) / (kwargs.get('epsilon', 0.622) * kwargs.get('lamda', 2.45))
@staticmethod
def saturation_vapour_pressure(*temperature):
r"""Return the saturation vapour pressure (:math:`e_s`) *[kPa]* according to
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 11, p. 36):
.. math::
e^{°}(T) = 0.6108 exp \left[\frac{17.27 T}{T + 237.3}\right]
:param temperature: air temperature (:math:`T`) *[°C]*
:type temperature: float or np.array
:return: (*float or np.array*) saturation vapour pressure (:math:`e_s`) *[kPa]*
"""
t = np.array([0.6108 * np.exp((17.27 * t) / (t + 237.3)) for t in temperature])
t = np.mean(t, axis=0)
return t
@staticmethod
def slope_of_saturation_vapour_pressure_curve(*temperature):
r"""Return the slope of saturation vapour pressure curve (:math:`\Delta`) *[kPa/°C]* according to
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 13, p. 37):
.. math::
\Delta = 4098\left[\frac{0.6108exp\left(\frac{17.27 T}{T + 237.3}\right)}{(T + 237.3)^{2}}\right]
:param temperature: air temperature (:math:`T`) *[°C]*
:type temperature: float or np.array
:return: (*float or np.array*) slope of saturation vapour pressure curve (:math:`\Delta`) *[kPa/°C]*
"""
sl = np.array([(4098.0 * PenmanMonteithDaily.saturation_vapour_pressure(t)) / ((t + 237.3) ** 2)
for t in temperature])
return np.mean(sl, axis=0)
@staticmethod
def actual_vapour_pressure(**kwargs):
"""Return the actual vapour pressure (:math:`e_a`) *[kPa]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(p. 37 , 38 , and 39):
:Keyword Arguments:
* **rh_min** (*float*) - 0.0 to 100.0 *[%]*
* **rh_max** (*float*) - 0.0 to 100.0 *[%]*
* **es_min** (*float*) - saturation vapour pressure for :math:`t\_min` *[kPa]*
* **es_max** (*float*) - saturation vapour pressure for :math:`t\_max` *[kPa]*
* **t_min** (*float*) - minimum air temperature *[°C]*
* **t_max** (*float*) - maximum air temperature *[°C]*
* **t_dew** (*float*) - dew point temperature *[°C]*
* **t_wet** (*float*) - wet bulb temperature *[°C]*
* **t_dry** (*float*) - dry bulb temperature *[°C]*
* **apsy** (*float*) - coefficient depending on the type of ventilation of the wet bulb *[-]*
:return: (*float or np.array*) actual vapour pressure (:math:`e_a`) *[kPa]*
"""
try:
rh_min = kwargs['rh_min'] / 100.0
rh_max = kwargs['rh_max'] / 100.0
if 'es_min' in kwargs and 'es_max' in kwargs:
es_min = kwargs['es_min']
es_max = kwargs['es_max']
else:
es_min = PenmanMonteithDaily.saturation_vapour_pressure(kwargs['t_min'])
es_max = PenmanMonteithDaily.saturation_vapour_pressure(kwargs['t_max'])
return (rh_max * es_min + rh_min * es_max) / 2.0
except KeyError:
t_dew = kwargs.get('t_dew', None)
return 0.6108 * math.exp((17.27 * t_dew) / (t_dew + 237.3))
def aerodynamic_resistance_factor(self):
r"""Return the aerodynamic resistance (:math:`r_a`) *[s/m]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 4, p. 20):
.. math::
r_a = \frac{ \ln \left( \frac{z_m - d}{z_{om}} \right) \ln \left( \frac{z_h - d}{z_{oh}} \right) }
{ k^2 u_z }
where (see :meth:`PenmanMonteithDaily()`):
:math:`u_z` --- the wind speed *[m/s]* at height :math:`z` (see :meth:`et0()`)
:math:`k` --- von Karman's constant *[-]*
:math:`zm` --- height of wind measurements *[m]*
:math:`zh` --- height of air humidity measurements *[m]*
The aerodynamic resistance factor :math:`f_{r_a}` is constant for a given crop:
.. math::
f_{r_a} = \frac{ \ln \left( \frac{z_m - d}{z_{om}} \right) \ln \left( \frac{z_h - d}{z_{oh}} \right) }
{ k^2}
with the zero plane displacement height (:math:`d`):
.. math::
d = f_d \cdot h
and roughness length governing momentum transfer (:math:`z_{om}`):
.. math::
z_{om} = f_{zom} \cdot h
where:
:math:`f_d` --- defined in :attr:`d_factor`
:math:`f_{zom}` --- defined in in :attr:`zom_factor`
:return: (*float*) aerodynamic resistance factor :math:`f_{r_a}`
"""
# zero plane displacement height, d [m]
d = self.d_factor * self.h
# roughness length governing momentum transfer [m]
zom = self.zom_factor * self.h
# roughness length governing transfer of heat and vapour [m]
zoh = self.zoh_factor * zom
return math.log((self.zm - d) / zom) * math.log((self.zh - d) / zoh) / (self.k ** 2)
def bulk_surface_resistance(self):
r"""Return (bulk) surface resistance (:math:`r_s`) *[s/m]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 5, p. 21):
.. math::
r_s = \frac{ r_l } { LAI_{active} }
where:
:math:`r_l` --- the bulk stomatal resistance of the well-illuminated leaf *[s/m]*
:math:`LAI_{active}` --- the active (sunlit) leaf area index *[m² (leaf area) / m² (soil surface)]*
A general equation for :math:`LAI_{active}` is:
.. math::
LAI_{active} = 0.5 LAI
with:
.. math::
LAI = 24 h
where :math:`h` is an optional input parameter in :class:`PenmanMonteithDaily`.
:return: (*float*) (bulk) surface resistance :math:`r_s` *[s/m]*
"""
#
# active (sunlit) leaf area index [m^2 (leaf area) / m^2 (soil surface)]
lai_active = self.lai_active_factor * self.lai
rs = self.rl / lai_active
return rs
@staticmethod
def to_u2(uz, z):
r""" Return the calculated wind speed at 2 meters above ground surface (:math:`u_2`) *[m/s]*:
.. math::
u_2 = \frac{ 4.87 u_z}{ \ln{(67.8 z - 5.42)}}
:param uz: measured wind speed at :math:`z` meters above ground surface *[m/s]*
:type uz: float or np.array
:param z: height of measurement above ground surface *[m]*
:type z: float
:return: (*float or np.array*) wind speed at 2 meters above ground surface *[m/s]*
"""
return uz * 4.87 / np.log(67.8 * z - 5.42)
@staticmethod
def extraterrestrial_radiation(dr, ws, lat, sd):
r"""Return the extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 21, p. 46):
.. math::
R_a = \frac{24(60)}{\pi} G_{sc} d_r [ \omega_s \sin(\varphi) \sin(\delta) + \cos(\varphi) \cos(\delta)
\sin(\omega_s)]
:param dr: inverse relative distance Earth-Sun (:math:`d_r`) *[-]*.
See :meth:`inverse_relative_distance_earth_sun`
:type dr: float
:param ws: sunset hour angle (:math:`\omega_s`) *[rad]*. See :meth:`sunset_hour_angle`
:type ws: float
:param lat: latitude (:math:`\varphi`) *[rad]*
:type lat: float
:param sd: solar declination (:math:`\delta`) *[rad]*. See :meth:`solar_declination`
:type sd: float
:return: *(float or np.array)* daily extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]*
"""
# solar_constant = 0.0820 # MJ.m-2.min-1
# (24.0 * 60.0 / pi) * solar_constant = 37.586031360582005
return 37.586031360582005 * dr * (ws * np.sin(lat) * np.sin(sd) + np.cos(lat) * np.cos(sd) * np.sin(ws))
@staticmethod
def inverse_relative_distance_earth_sun(day):
r"""Return the inverse relative distance Earth-Sun (:math:`d_r`) *[-]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 23, p. 46):
.. math::
d_r = 1 + 0.033 \cos{ \left( \frac{2 \pi}{365} J \right)}
:param day: day of the year (:math:`J`) *[-]*. Range: :math:`1 \leq J \leq 366`
:type day: int or np.array
:return: *(float or np.array)* inverse relative distance Earth-Sun (:math:`d_r`) *[-]*
"""
# 2.0 * pi / 365 = 0.01721420632103996
return 1 + 0.033 * np.cos(0.01721420632103996 * day)
@staticmethod
def solar_declination(day):
r"""Return the solar declination (:math:`\delta`) *[rad]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 24, p. 46):
.. math::
\delta = 0.409 \sin{ \left( \frac{2 \pi}{365} J - 1.39\right)}
:param day: day of the year (:math:`J`) *[-]*. Range: :math:`1 \leq J \leq 366`
:type day: int
:return: (*float or np.array*) solar declination (:math:`\delta`) *[rad]*
"""
# 2.0 * pi / 365 = 0.01721420632103996
return 0.409 * np.sin(0.01721420632103996 * day - 1.39)
@staticmethod
def sunset_hour_angle(lat, sd):
r"""Return the sunset hour angle (:math:`\omega_s`) *[rad]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 25, p. 46):
.. math::
\omega_s = \arccos{ \left[-tan(\varphi)tan(\delta)\right]}
:param lat: latitude (:math:`\varphi`) *[rad]*
:type lat: float or np.array
:param sd: solar declination (:math:`\delta`) *[rad]*. See :meth:`solar_declination`
:type sd: float or np.array
:return: (*float or np.array*) sunset hour angle (:math:`\omega_s`) *[rad]*
"""
return np.arccos(-np.tan(sd) * np.tan(lat))
@staticmethod
def daylight_hours(ws):
r"""Return the daylight hours (:math:`N`) *[hour]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 34, p. 49):
.. math::
N = \frac{24}{\pi} \omega_s
:param ws: sunset hour angle (:math:`\omega_s`) *[rad]*. See :meth:`sunset_hour_angle`
:type ws: float or np.numpy
:return: (*float or np.numpy*) daylight hours (:math:`N`) *[hour]*
"""
# 24.0 / pi = 7.639437268410976
return 7.639437268410976 * ws
@staticmethod
def clear_sky_shortwave_radiation(ra, elevation=0.0, a_s=0.25, b_s=0.50):
r"""Return the clear-sky shortwave radiation (:math:`R_{so}`) *[MJ/m²day]*. It is required for computing
:meth:`net_longwave_radiation`.
For near sea level or when calibrated values for :math:`a_s` and :math:`b_s` are available
(`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_, eq. 36,
p. 51):
.. math::
R_{so} = (a_s + b_s ) R_a
When calibrated values for :math:`a_s` and :math:`b_s` are not available
(`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_,
eq. 37, p. 51):
.. math::
R_{so} = (0.75 + 2 * 10^{−5} z) R_a
where :math:`z` is the station elevation above sea level *[m]*.
:param ra: extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]*. See :meth:`extraterrestrial_radiation`
:type ra: float or np.numpy
:param elevation: meters above sea level see (:math:`z`) [m]. See :attr:`elevation`
:type elevation: float or np.numpy
:param a_s: regression constant (:math:`a_s`) *[-]*. Default :math:`a_s=0.25`. It expresses the fraction of
extraterrestrial radiation reaching the earth on overcast days (:math:`n = 0`)
:type a_s: float or np.numpy
:param b_s: regression constant (:math:`b_s`) *[-]*. Default :math:`b_s=0.50`. The expression
:math:`a_s+b_s` indicates the fraction of extraterrestrial radiation reaching the earth on clear days
(:math:`n = N`)
:type b_s: float or np.numpy
:return: (*float or np.numpy*) daily clear-sky shortwave radiation (:math:`R_{so}`) *[MJ/m²day]*
"""
rs0 = ((a_s + b_s) + 2e-5 * elevation) * ra
return rs0
@staticmethod
def shortwave_radiation(ra, n, mn, a_s=0.25, b_s=0.50):
r"""Return the daily shortwave radiation (:math:`R_s`) *[MJ/m²day]* according to the Angstrom formula as
described in `FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 35, p. 50):
.. math::
R_s = \left( a_s + b_s \frac{n}{N} \right) R_a
Depending on atmospheric conditions (humidity, dust) and solar declination (latitude and month), the Angstrom
values :math:`a_s` and :math:`b_s` will vary. Where no actual solar radiation data are available and no
calibration has been carried out for improved :math:`a_s` and :math:`b_s` parameters, the values
:math:`a_s = 0.25` and :math:`b_s = 0.50` are recommended.
:param ra: extraterrestrial radiation (:math:`R_a`) *[MJ/m²day]*. See :meth:`extraterrestrial_radiation`
:type ra: float or np.array
:param n: actual duration of sunshine or cloudless hours (:math:`n`) *[hour]*
:type n: float or np.array
:param mn: maximum possible duration of sunshine or daylight hours (:math:`N`) *[hour]*
See :meth:`daylight_hours`
:type mn: float, np.array
:param a_s: regression constant (:math:`as`) *[-]*. Default :math:`a_s=0.25`. It expresses the fraction
of extraterrestrial radiation reaching the earth on overcast days (:math:`n = 0`)
:type a_s: float or np.numpy
:param b_s: regression constant (:math:`bs`) *[-]*. Default :math:`b_s=0.50`. The expression
:math:`a_s+b_s` indicates the fraction of extraterrestrial radiation reaching the earth on clear days
(:math:`n = N`)
:type b_s: float or np.numpy
:return: (*float, np.array*) daily total shortwave radiation (:math:`R_s`) *[MJ/m²day]* reaching the earth
.. note::
If shortwave radiation (i.e., solar radiation) measurements are available, :meth:`shortwave_radiation`
function is no needed. Measurements of shortwave radiation may be directly used as input data in
:meth:`et0`.
"""
rns = (a_s + b_s * n / mn) * ra
return rns
@staticmethod
def net_shortwave_radiation(rs, albedo):
r"""The net shortwave radiation (:math:`R_{ns}`) *[MJ/m²day]* resulting from the balance between incoming
and reflected solar radiation as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 38, p. 51):
.. math::
R_{ns} = (1 − \alpha) R_s
:param rs: daily shortwave radiation (:math:`R_s`) *[MJ/m²day]*. See :meth:`shortwave_radiation`
:type rs: float or np.array
:param albedo: albedo or reflection coefficient (:math:`\alpha` *[-]*). Range:
:math:`0.0 \leq \alpha \leq 1.0` (:math:`\alpha=0.23` for the hypothetical grass reference crop).
See :class:`PenmanMonteithDaily` and :meth:`et0`
:type albedo: float or np.array
:return: (*float or np.array*) daily net shortwave radiation (:math:`R_{ns}`) *[MJ/m²day]* reaching the earth
"""
return (1.0 - albedo) * rs
@staticmethod
def net_longwave_radiation(t_min, t_max, rs, rs0, ea=None):
r"""Return the net outgoing longwave radiation (:math:`R_{nl}`) *[MJ/m²day]* as defined in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_
(eq. 39, p. 52):
.. math::
R_{nl} = \sigma\left[\frac{T_{max,K}^4 + T_{min,K}^4}{2}\right](0.34-0.14\sqrt{e_a})\left(1.35
\frac{R_s}{R_{so}}-0.35\right)
:param t_min: minimum daily air temperature (:math:`T_{max}`) *[°C]*
:type t_min: float or np.array
:param t_max: maximum daily air temperature (:math:`T_{min}`) *[°C]*
:type t_max: float or np.array
:param rs: shortwave radiation (:math:`R_s`) *[MJ/m²day]*. See :meth:`shortwave_radiation`
:type rs: float or np.array
:param rs0: clear-sky shortwave radiation (:math:`R_{so}`) *[MJ/m²day]*. See
:meth:`clear_sky_shortwave_radiation`
:type rs0: float or np.array
:param ea: actual vapour pressure (:math:`e_a`) *[kPa]*
:type ea: float or np.array
:return: (*float or np.array*) daily net outgoing longwave radiation (:math:`R_{nl}`) *[MJ/m²day]*
.. note::
The :math:`R_s/R_{so}` term in the equation above must be limited so that :math:`R_s/R_{so} \leq 1.0`.
"""
t_min = t_min + 273.15
t_max = t_max + 273.15
if ea is not None:
rln = 4.903e-9 * (t_min ** 4 + t_max ** 4) * 0.5 * (0.34 - 0.14 * np.sqrt(ea)) * (1.35 * rs / rs0 - 0.35)
else:
t_mean = (t_min + t_max) / 2.0
rln = 4.903e-9 * (t_min ** 4 + t_max ** 4) * 0.5 * \
(-0.02 + 0.261 * np.exp(-7.77e10 ** -4 * t_mean ** 2)) * (1.35 * rs / rs0 - 0.35)
return rln
def et0(self, **kwargs):
r"""Returns potential evapotranspiration (:math:`ETo`) *[mm/day]* as described in
`FAO 56 <http://www.fao.org/tempref/SD/Reserved/Agromet/PET/FAO_Irrigation_Drainage_Paper_56.pdf>`_. Reference
(grass) potencial evapotranspiration is returned for default constructor values. If values in `**kwargs` are
arrays, their lengths must be the same.
:Keyword Arguments:
* **date** (*str, datetime.date, datetime.datetime, pandas.TimeStamp, or np.array*)
* **doy** (*int or np.array*) - day of the year (:math:`J`) *[-]*. Range: :math:`1 \leq J \leq 366`.
It is not used if date is given
* **u2** (*float or np.array*) - wind speed at 2 meters above ground surface *[m/s]*
* **uz** (*float or np.array*) - measured wind speed at :math:`z` meters above ground surface *[m/s]*
* **z** (*float or np.array*) - height of measurement above ground surface *[m]*
* **t_mean** (*float or np.array*) - daily mean air temperature *[°C]*
* **t_min** (*float or np.array*) - daily minimum air temperature *[°C]*
* **t_max** (*float or np.array*) - daily maximum air temperature *[°C]*
* **rh_mean** (*float or np.array*) - daily mean relative humidity *[%]*
* **rh_min** (*float or np.array*) - daily minimum relative humidity *[%]*
* **rh_max** (*float or np.array*) - daily maximum relative humidity *[%]*
* **rs** (*float or np.array*) - solar or shortwave radiation *[MJ/m²day]*
* **n** (*float or np.array*) - daily actual duration of sunshine or cloudless hours *[hour]*
* **g** (*float or np.array*) - soil heat flux density *[MJ/m²day]*. If not given, *g* defined in
:meth:`PenmanMonteithDaily` will be used
* **a_s** (*float or np.array*) - see :meth:`shortwave_radiation`. Default :math:`a_s = 0.25`
* **b_s** (*float or np.array*) - see :meth:`shortwave_radiation`. Default :math:`b_s = 0.50`
* **negative_rnl** (*bool*) - allow negative net longwave radiation. Default :math:`negative\_rnl=True`
* **negative_et0** (*bool*) - allow negative reference evapotranspiration. Default :math:`negative\_et0=True`
:return: (*float or np.array*) potential evapotranspiration (:math:`ETo`) *[mm/day]*
Cases:
* If date and doy are given, :math:`doy` is disregarded
* if :math:`uz` is given, :math:`z` must also be given
* if :math:`u2` and (:math:`uz`, :math:`z`) are given, both :math:`uz` and :math:`z` are disregarded
* if :math:`rs` and :math:`n` are given, :math:`n` will be disregarded
* The best options for air temperature are, in this order: 1) t_min, t_max, and t_mean, 2) t_min, t_max, and
3) tmean
* The best options for relative air humidity are, in this order: 1) rh_max and rh_min, 2) rh_max, and 3)
rh_mean
Example 1::
>>> from evapotranspiration.penman_monteith_daily import PenmanMonteithDaily
>>> pm = PenmanMonteithDaily(elevation=100, latitude=50.80)
>>> et0 = pm.et0(doy=187, u2=2.078, t_min=12.3, t_max=21.5, rh_min=63, rh_max=84, n=9.25)
>>> print(et0)
3.872968723753793
Example 2::
>>> from evapotranspiration.penman_monteith_daily import PenmanMonteithDaily
>>> pm = PenmanMonteithDaily(elevation=100, latitude=50.80)
>>> et0 = pm.et0(date='2001-07-06', u2=2.078, t_min=12.3, t_max=21.5, rh_min=63, rh_max=84, n=9.25)
>>> print(et0)
3.872968723753793
Example 3::
>>> from evapotranspiration.penman_monteith_daily import PenmanMonteithDaily
>>> pm = PenmanMonteithDaily(elevation=100, latitude=50.80)
>>> date=np.array(['2001-07-06', '2001-07-06'])
>>> u2=np.array([2.078, 2.078])
>>> t_min=np.array([12.3, 12.3])
>>> t_max=np.array([21.5, 21.5])
>>> rh_min=np.array([63, 63])
>>> rh_max=np.array([84, 84])
>>> n=np.array([9.25, 9.25])
>>> et0 = pm.et0(date=date, u2=u2, t_min=t_min, t_max=t_max, rh_min=rh_min, rh_max=rh_max, n=n)
>>> print(et0)
[3.87296872 3.87296872]
"""
self.reset()
try:
self.u2 = kwargs.get('u2', None)
if self.u2 is None:
self.u2 = self.to_u2(kwargs['uz'], kwargs['z'])
except KeyError:
raise KeyError('Penmam-Monteith: Either u2 or both uz and z must be given')
t_min = kwargs.get('t_min', None)
if t_min is None:
t_min = kwargs['t_mean']
t_max = kwargs.get('t_max', None)
if t_max is None:
t_max = kwargs['t_mean']
t_mean = kwargs.get('t_mean', None)
rh_min = kwargs.get('rh_min', None)
rh_max = kwargs.get('rh_max', None)
if rh_max is not None:
if rh_min is None:
rh_min = rh_max
else:
rh_min = rh_max = kwargs['rh_mean']
self.doy = kwargs.get('doy', None)
if self.doy is None:
self.doy = pd.to_datetime(kwargs['date']).dayofyear
self.rs = kwargs.get('rs', None)
n = kwargs.get('n', None)
g = kwargs.get('g', None)
if g is None:
g = self.g_default
a_s = kwargs.get('a_s', 0.25)
b_s = kwargs.get('b_s', 0.50)
if t_mean is None:
t_mean = (t_min + t_max) / 2.0
self.ld = PenmanMonteithDaily.latent_heat_of_vaporization(t_mean)
# In FAO 56, where delta occurs in the numerator and denominator, the slope
# of the vapour pressure curve is calculated using mean air temperature (Equation 9)
self.s = PenmanMonteithDaily.slope_of_saturation_vapour_pressure_curve(t_mean)
self.pc = PenmanMonteithDaily.psychrometric_constant(self.p, lamda=self.ld)
self.es = PenmanMonteithDaily.saturation_vapour_pressure(t_min, t_max)
self.ea = PenmanMonteithDaily.actual_vapour_pressure(rh_min=rh_min, rh_max=rh_max, t_min=t_min, t_max=t_max)
try:
self.ra = np.array([self.ra_366[i] for i in self.doy])
self.rs0 = np.array([self.rs0_366[i] for i in self.doy])
if self.rs is None:
self.mn = np.array([self.daylight_hours_366[i] for i in self.doy])
self.rs = self.shortwave_radiation(self.ra, n, self.mn, a_s, b_s)
# FAO56 eq. 39. The Rs/Rso term in equation 39 must be limited so that Rs/Rso ≤ 1.0.
self.rs = np.where(self.rs > self.rs0, self.rs0, self.rs)
except TypeError:
self.ra = self.ra_366[self.doy]
self.rs0 = self.rs0_366[self.doy]
if self.rs is None:
self.mn = self.daylight_hours_366[self.doy]
self.rs = self.shortwave_radiation(self.ra, n, self.mn, a_s, b_s)
# FAO56 eq. 39. The Rs/Rso term in equation 39 must be limited so that Rs/Rso ≤ 1.0.
self.rs = self.rs0 if self.rs > self.rs0 else self.rs
self.rns = self.net_shortwave_radiation(self.rs, self.albedo)
self.rnl = self.net_longwave_radiation(t_min, t_max, self.rs, self.rs0, self.ea)
if kwargs.get('negative_rnl', False) and self.rnl < 0.0:
self.rnl = 0.0
self.rn = self.rns - self.rnl
# denominator of FAO 56 eq. 3
etd = self.ld * (self.s + self.pc * (1 + self.f2 * self.u2))
# ETo energy component of FAO 56 eq. 3
self.etr = self.s * (self.rn - g) / etd
# ETo wind component of FAO 56 eq. 3
self.etw = (self.ld * self.pc * self.u2 * self.f1 * (self.es - self.ea) / (t_mean + 273.0)) / etd
# Reference evapotranspiration
self.et = self.etr + self.etw
self.et = np.where(self.et < 0.0, 0.0, self.et)
try:
self.et = float(self.et)
except TypeError:
pass
if kwargs.get('negative_rnl', False) and self.et < 0.0:
self.et = 0.0
return self.et
def et0_frame(self, df, **kwargs):
"""Return the input DataFrame extended by :meth:`et0` and further calculation parameters.
:param df: pandas DataFrame with columns corresponding to the inputs described in :meth:`et0`
:type df: pandas.DataFrame
:Keyword Arguments:
* **show_all** (*bool*) - show all results if :math:`True`, otherwise set `parameter=True` to show individual
parameters. For example :math:`doy=True`, :math:`ld=True`, etc. See :meth:`PenmanMonteithDaily`
:return: (*pandas.DataFrame*) DataFrame
"""
doy_str = kwargs.get('doy', 'doy')
date_str = kwargs.get('date', 'date')
u2_str = kwargs.get('u2', 'u2')
uz_str = kwargs.get('uz', 'uz')
z_str = kwargs.get('z', 'z')
t_mean_str = kwargs.get('t_mean', 't_mean')
t_min_str = kwargs.get('t_min', 't_min')
t_max_str = kwargs.get('t_max', 't_max')
rh_mean_str = kwargs.get('rh_mean', 'rh_mean')
rh_min_str = kwargs.get('rh_min', 'rh_min')
rh_max_str = kwargs.get('rh_max', 'rh_max')
rs_str = kwargs.get('rs', 'rs')
n_str = kwargs.get('n', 'n')
g_str = kwargs.get('g', 'g')
columns = df.columns
doy = df[doy_str].values if doy_str in columns else None
date = df[date_str].values if date_str in columns else None
u2 = df[u2_str].values if u2_str in columns else None
uz = df[uz_str].values if uz_str in columns else None
z = df[z_str].values if z_str in columns else None
t_mean = df[t_mean_str].values if t_mean_str in columns else None
t_min = df[t_min_str].values if t_min_str in columns else None
t_max = df[t_max_str].values if t_max_str in columns else None
rh_mean = df[rh_mean_str].values if rh_mean_str in columns else None
rh_min = df[rh_min_str].values if rh_min_str in columns else None
rh_max = df[rh_max_str].values if rh_max_str in columns else None
rs = df[rs_str].values if rs_str in columns else None
n = df[n_str].values if n_str in columns else None
g = df[g_str].values if g_str in columns else None
self.et0(doy=doy, date=date, u2=u2, uz=uz, z=z, t_mean=t_mean, t_min=t_min, t_max=t_max,
rh_mean=rh_mean, rh_min=rh_min, rh_max=rh_max, rs=rs, n=n, g=g)
show_all = kwargs.get('show_all', True)
if show_all:
if doy is None:
df['DoY'] = self.doy
df['Lambda'] = self.ld
df['Psy'] = self.pc
df['Delta'] = self.s
df['es'] = self.es
df['ea'] = self.ea
df['Rs'] = self.rs
df['Rns'] = self.rns
df['Rnl'] = self.rnl
df['ET0r'] = self.etr
df['ET0w'] = self.etw
df['ET0'] = self.et
else:
if kwargs.get('Lambda', False):
df['Lambda'] = self.ld
if kwargs.get('Psy', False):
df['Psy'] = self.pc
if kwargs.get('Delta', False):
df['Delta'] = self.s
if kwargs.get('es', False):
df['es'] = self.es
if kwargs.get('ea', False):
df['ea'] = self.ea
if kwargs.get('Rs', False):
df['Rs'] = self.rs
if kwargs.get('Rns', False):
df['Rns'] = self.rns
if kwargs.get('Rnl', False):
df['Rnl'] = self.rnl
if kwargs.get('ET0r', False):
df['ET0r'] = self.etr
if kwargs.get('ET0w', False):
df['ET0w'] = self.etw
if kwargs.get('ET0', True):
df['ET0'] = self.et
return df
| 3.09375
| 3
|
src/cnc-app-name/views.py
|
scotchoaf/cnc-skeleton
| 0
|
12649
|
<reponame>scotchoaf/cnc-skeleton
# Copyright (c) 2018, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: $YOURNAME and $EMAIL
"""
Palo Alto Networks cnc-skeleton
This software is provided without support, warranty, or guarantee.
Use at your own risk.
"""
from django import forms
from django.contrib import messages
from django.shortcuts import HttpResponseRedirect
# Every app will need to import at least the CNCBaseFormView
from pan_cnc.views import CNCBaseFormView, ProvisionSnippetView
# All class attributes can be defined here or in the .pan-cnc.yaml
# In this case, we have defined class level attributes there. This makes it possible to
# create apps while writing no code at all. Just create a view in the .pan-cnc.yaml based on a
# CNCBaseFormView and configure the attributes as needed.
# If you want additional logic, then you subclass the CNCBaseFormView and add your logic there.
# The two main methods to override are 'generate_dynamic_form' and 'form_valid'.
#
# generate_dynamic_form gets called before the web form is created and displayed to the user
#
# form_valid is called after they submit the form
#
class ExampleAppView(CNCBaseFormView):
def form_valid(self, form):
# we now have the form from the user, let's get some values to perform some logic
# every variable entered by the user is saved in the user session. We can access it using this
# convenience method:
var_name = self.get_value_from_workflow('var_name', 'DEFAULT_IF_NOT_FOUND')
var_name_again = self.get_value_from_workflow('var_name_again', 'DEFAULT_IF_NOT_FOUND')
# silly exercise to just upper case the value entered by the user
var_name_upper = str(var_name).upper()
var_name_again_reverse = str(var_name_again)[::-1]
# now, save the values back to the workflow
self.save_value_to_workflow('var_name', var_name_upper)
self.save_value_to_workflow('var_name_again', var_name_again_reverse)
# and call our super to continue processing
return super().form_valid(form)
# Again override the ProvisionSnippetView as we are only building a workflow here.
# CNCBaseFormView will only display the form and perform a redirect after 'form_valid'
# however, ProvisionSnippetView will actually redirect to another CNC class based in the skillet type
# I.e. this is where the logic of how to interact with APIs, PAN-OS devies, render templates, etc is all done
# You usually want a child of this class to the 'last' in a chain if you need extended logic
class ExampleAppPasswordView(ProvisionSnippetView):
def get_snippet(self):
return self.snippet
# this method allows us to customize what is shown to the user beyond what is present in the loaded skillet
# 'variables' section
def generate_dynamic_form(self):
# let's first get the generated from from our base class
dynamic_form = super().generate_dynamic_form()
dynamic_form.fields['password_2'] = forms.CharField(widget=forms.PasswordInput(render_value=True),
initial='')
return dynamic_form
# the user has now completed the form and we have the results
def form_valid(self, form):
# Everything the user has entered will be available here in the 'workflow'
# Note that any 'variable' entries defined in the .meta-cnc snippet will
# be automatically added to the session workflow
workflow = self.get_workflow()
# get the values from the user submitted here
var_name = workflow.get('var_name')
var_name_again = workflow.get('var_name_again')
example_password = workflow.get('example_password')
# to access variables that were not defined in the snippet
# you can grab them directly from the POST on the request object
password_2 = self.request.POST['password_2']
print(f'checking if {example_password} matches {password_2}')
if example_password != password_2:
# Send an error message back to the user
messages.add_message(self.request, messages.ERROR, 'Passwords do not match!')
return HttpResponseRedirect('workflow00')
print('Got some vars here!')
print(f'Found value for var_name: {var_name}')
print(f'Found another value for var_name_again {var_name_again}')
return super().form_valid(form)
| 1.875
| 2
|
tcc_server/emulatorRPi.py
|
MegaNo0body/tcc
| 1
|
12650
|
<gh_stars>1-10
import sys
from time import sleep
from random import randint
from urllib.request import urlopen
from urllib.parse import urlencode
if len(sys.argv) != 2:
print('Por favor, usar: ' + sys.argv[0] + ' {idSensor}')
print('Exemplo: ' + sys.argv[0] + ' 8')
else:
sensorId = sys.argv[1]
URL_SERVICO = 'http://127.0.0.1:8081/tcc/sensor/' + sensorId + '/inserir'
VARIACAO_MAXIMA = 5
valores = {
'Chuva': 80.0,
'UmidadeAr': 85.0,
'UmidadeSolo': 80.0,
'TemperaturaAr': 30.0,
'TemperaturaSolo': 25.0
}
variacao = {}
for k in valores:
valores[k] = valores[k] + randint(-3, +3) / 10
variacao[k] = 0.0
accel = {}
while True:
for k in variacao:
accel[k] = randint(-1.0, +1.0) / 10
r = randint(10, 30)
for i in range(r):
data = {}
for k in variacao:
variacao[k] = variacao[k] + accel[k]
variacao[k] = max(variacao[k], -VARIACAO_MAXIMA)
variacao[k] = min(variacao[k], +VARIACAO_MAXIMA)
data[k] = '%.2f' % (valores[k] + round(variacao[k], 2))
data = urlencode(data)
print(data)
urlopen(URL_SERVICO, data.encode('ascii'))
sleep(0.50)
| 2.90625
| 3
|
sayn/logging/file_logger.py
|
robin-173/sayn
| 105
|
12651
|
from pathlib import Path
import logging
from .logger import Logger
from .log_formatter import LogFormatter
class FileLogger(Logger):
fmt = LogFormatter(use_colour=False, output_ts=False)
logger = None
def __init__(self, folder, format=None):
if format is None:
format = ("%(asctime)s|%(levelname)s|%(message)s",)
formatter = logging.Formatter(format)
log_file = Path(folder, "sayn.log")
if not log_file.parent.exists():
log_file.parent.mkdir(parents=True)
handler = logging.FileHandler(log_file)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
self.logger = logger
def print(self, s=None):
if s is not None:
if s["level"] == "info":
func = self.logger.info
elif s["level"] == "error":
func = self.logger.error
elif s["level"] == "warning":
func = self.logger.warning
else:
func = self.logger.debug
s = s["message"]
if isinstance(s, str):
s = [s]
elif not isinstance(s, list):
raise ValueError("error in logging print")
func(f"{s[0]}")
for e in s[1:]:
for l in e.split("\n"):
func(f"{l}")
| 2.828125
| 3
|
tests/test_docs.py
|
gitter-badger/pygsuite
| 0
|
12652
|
from pygsuite import DefaultFonts, TextStyle, Color
from pygsuite.docs.doc_elements.paragraph import Paragraph
BRIGHT_GREEN_HEX = "#72FF33"
def test_text(test_document):
document = test_document
docbody = document.body
docbody.delete()
docbody.add_text(
"TEST_CUSTOM\n",
style=TextStyle(font_size=18, font_weight=200, color=Color(hex=BRIGHT_GREEN_HEX)),
)
docbody.add_text("TEST_DEFAULT\n", style=DefaultFonts.NORMAL_TEXT)
docbody.add_text("TEST_INDEX\n", style=DefaultFonts.NORMAL_TEXT, position=1)
document.flush()
text = [item for item in document.body if isinstance(item, Paragraph)]
assert text[0].text.strip() == "TEST_INDEX"
assert text[2].text.strip() == "TEST_DEFAULT"
# TODO: return style objects
assert text[1].elements[0].style.font_size == 18
def test_paragraph(test_document):
document = test_document
docbody = document.body
docbody.delete()
docbody.add_text(
"TEST_CUSTOM\n",
style=TextStyle(font_size=18, font_weight=200, color=Color(hex=BRIGHT_GREEN_HEX)),
)
docbody.flush()
docbody.content[1].text = "TEST_CUSTOM_SETTER"
docbody.add_text("INSERT\n", position=0)
docbody.flush()
docbody.paragraphs[1].elements[0].style = TextStyle(
font_size=24, font_weight=500, color=Color(hex=BRIGHT_GREEN_HEX)
)
docbody.flush()
assert docbody.content[2].text.strip() == "TEST_CUSTOM_SETTER"
assert docbody.paragraphs[1].elements[0].style.font_size == 24
| 2.59375
| 3
|
neutra/vae.py
|
dieterichlawson/google-research
| 4
|
12653
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
# pylint: disable=invalid-name,g-bad-import-order,missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
from absl import app
from absl import flags
from concurrent import futures
import gin
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Any, Dict, List, Optional, Tuple
from neutra import utils
tfd = tfp.distributions
tfb = tfp.bijectors
FLAGS = flags.FLAGS
TRAIN_BATCH = 250
TEST_BATCH = 1000
AIS_BATCH = 50
def ReduceL2(tensor, dims):
return tf.sqrt(tf.reduce_sum(tf.square(tensor), dims))
@utils.MakeTFTemplate
def Conv2DWN(inputs,
num_filters,
kernel_size=[3, 3],
stride=[1, 1],
pad="SAME",
activation=None,
weights_initializer=utils.L2HMCInitializer(),
biases_initializer=tf.zeros_initializer(),
scope=None):
if activation is None:
activation = lambda x: x
num_inputs = int(inputs.shape[3])
with tf.variable_scope(scope, "conv_2d_wn"):
w = tf.get_variable(
"w", [kernel_size[0], kernel_size[1], num_inputs, num_filters],
initializer=weights_initializer)
if biases_initializer is not None:
b = tf.get_variable("b", [num_filters], initializer=biases_initializer)
g = tf.get_variable(
"g", initializer=tf.log(ReduceL2(w.initialized_value(), [0, 1, 2])))
g = tf.exp(g)
w = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(w, [0, 1, 2])
out = tf.nn.conv2d(inputs, w, [1, stride[0], stride[1], 1], pad)
if biases_initializer is not None:
out += tf.reshape(b, [1, 1, 1, num_filters])
return activation(out)
def GetLinearARMask(num_inputs, num_outputs, zero_diagonal=False):
assert num_inputs % num_outputs == 0 or num_outputs % num_inputs == 0, "%d vs %d" % (num_inputs, num_outputs)
mask = np.ones([num_inputs, num_outputs], dtype=np.float32)
if num_outputs >= num_inputs:
k = num_outputs // num_inputs
for i in range(num_inputs):
mask[i + 1:, i * k:(i + 1) * k] = 0
if zero_diagonal:
mask[i:i + 1, i * k:(i + 1) * k] = 0
else:
k = num_inputs // num_outputs
for i in range(num_outputs):
mask[(i + 1) * k:, i:i + 1] = 0
if zero_diagonal:
mask[i * k:(i + 1) * k:, i:i + 1] = 0
return mask
def GetConvARMask(h, w, num_inputs, num_filters, zero_diagonal=False):
l = (h - 1) // 2
m = (w - 1) // 2
mask = np.ones([h, w, num_inputs, num_filters], dtype=np.float32)
mask[:l, :, :, :] = 0
mask[l, :m, :, :] = 0
mask[l, m, :, :] = GetLinearARMask(num_inputs, num_filters, zero_diagonal)
return mask
@utils.MakeTFTemplate
def Conv2DAR(inputs, num_filters,
kernel_size=[3, 3],
zero_diagonal=False,
weights_initializer=None,
biases_initializer=tf.zeros_initializer(),
scope=None):
num_inputs = int(inputs.get_shape()[3])
mask = GetConvARMask(kernel_size[0], kernel_size[1], num_inputs, num_filters, zero_diagonal)
w = tf.get_variable("w", [kernel_size[0], kernel_size[1], num_inputs, num_filters], initializer=weights_initializer)
b = tf.get_variable("b", [num_filters], initializer=biases_initializer)
g = tf.get_variable(
"g", initializer=tf.log(ReduceL2(w.initialized_value() * mask, [0, 1, 2])))
g = tf.exp(g)
w = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(w * mask, [0, 1, 2])
out = tf.nn.conv2d(inputs, w, [1, 1, 1, 1], "SAME")
return out + tf.reshape(b, [1, 1, 1, num_filters])
@utils.MakeTFTemplate
def ConvAR(x,
h=None,
real_event_shape=[],
hidden_layers=[],
**kwargs):
#input_shape = (
# np.int32(x.shape.as_list())
# if x.shape.is_fully_defined() else tf.shape(x))
#x = tf.reshape(x, [-1] + real_event_shape)
for i, units in enumerate(hidden_layers):
x = Conv2DAR("conv2d_ar_%d"%i, num_filters=units, zero_diagonal=False, **kwargs)(inputs=x)
if i == 0 and h is not None:
if h.shape[-1] != x.shape[-1]:
x += Conv2DWN("conv2d_h", num_filters=int(x.shape[-1]), kernel_size=[1, 1], stride=[1, 1])(h)
else:
x += h
x = tf.nn.elu(x)
shift = Conv2DAR(
"conv2d_shift",
num_filters=real_event_shape[-1],
zero_diagonal=True,
**kwargs)(
inputs=x)
log_scale = Conv2DAR(
"conv2d_scale",
num_filters=real_event_shape[-1],
zero_diagonal=True,
**kwargs)(
inputs=x)
#shift = tf.reshape(shift, input_shape)
#log_scale = tf.reshape(log_scale, input_shape)
return shift, log_scale
@utils.MakeTFTemplate
def DenseWN(inputs,
num_outputs,
activation=None,
weights_initializer=utils.L2HMCInitializer(),
biases_initializer=tf.zeros_initializer(),
scope=None):
if activation is None:
activation = lambda x: x
num_inputs = int(inputs.get_shape()[1])
with tf.variable_scope(scope, "dense_wn"):
w = tf.get_variable(
"w", [num_inputs, num_outputs], initializer=weights_initializer)
if biases_initializer is not None:
b = tf.get_variable("b", [num_outputs], initializer=biases_initializer)
g = tf.get_variable(
"g", initializer=tf.log(ReduceL2(w.initialized_value(), [0])))
g = tf.exp(g)
w = g * tf.nn.l2_normalize(w, [0])
out = tf.matmul(inputs, w)
if biases_initializer is not None:
out += tf.expand_dims(b, 0)
return activation(out)
@utils.MakeTFTemplate
def ResConv2D(inputs,
num_filters,
kernel_size,
stride,
activation=tf.nn.elu,
output_init_factor=1.0):
x = Conv2DWN(
"conv2d_in",
num_filters=num_filters,
kernel_size=kernel_size,
stride=stride,
activation=activation)(
inputs=inputs)
non_linear = Conv2DWN(
"conv2d_nl",
num_filters=num_filters,
kernel_size=kernel_size,
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(factor=output_init_factor))(
inputs=x)
skip = Conv2DWN(
"conv2d_skip",
num_filters=num_filters,
kernel_size=kernel_size,
stride=stride,
weights_initializer=utils.L2HMCInitializer(factor=output_init_factor))(
inputs=inputs)
return non_linear + skip
@utils.MakeTFTemplate
def ResDense(inputs, num_dims, activation=None):
x = DenseWN("dense_in", num_outputs=num_dims, activation=activation)(inputs)
non_linear = DenseWN("dense_nl", num_outputs=num_dims)(x)
skip = DenseWN("dense_skip", num_outputs=num_dims)(x)
return non_linear + skip
@gin.configurable("conv_hier_encoder")
@utils.MakeTFTemplate
def ConvHierEncoder(images, depth = 2, num_blocks = 2, z_dims = 32, h_dims=160):
x = Conv2DWN("conv2d_in", num_filters=h_dims, stride=[2, 2], kernel_size=[5, 5])(inputs=images - 0.5)
means = []
raw_scales = []
contexts = []
for i in range(depth):
for j in range(num_blocks):
downsample = i > 0 and j == 0
if downsample:
stride = [2, 2]
else:
stride = [1, 1]
h = tf.nn.elu(x)
h = Conv2DWN("conv2d_in_%d_%d"%(i, j), num_filters=2*z_dims + 2 * h_dims, stride=stride, kernel_size=[3, 3])(inputs=h)
mean, raw_scale, context, h = tf.split(h, [z_dims, z_dims, h_dims, h_dims], -1)
means.append(mean)
raw_scales.append(raw_scale)
contexts.append(context)
h = tf.nn.elu(h)
h = Conv2DWN("conv2d_h_%d_%d"%(i, j), num_filters=h_dims, stride=[1, 1], kernel_size=[3, 3])(inputs=h)
if downsample:
x = tf.image.resize_nearest_neighbor(x, [int(x.shape[1]) // 2, int(x.shape[2]) // 2])
x += 0.1 * h
return means, raw_scales, contexts
@gin.configurable("conv_hier_prior_post")
@utils.MakeTFTemplate
def ConvHierPriorPost(images=None,
encoder=None,
z=None,
batch=None,
depth = 2,
num_blocks = 2,
z_dims = 32,
h_dims = 160,
image_width = 32):
is_q = encoder is not None
if is_q:
means, raw_scales, up_contexts = encoder(images)
if batch is None:
if images is not None:
batch = tf.shape(images)[0]
else:
batch = tf.shape(z[0])[0]
h = tf.get_variable("h_top", [h_dims], initializer=tf.zeros_initializer())
h = tf.reshape(h, [1, 1, 1, -1])
top_width = image_width // 2 ** num_blocks
h = tf.tile(h, [batch, top_width, top_width, 1])
x = h
ret_z = []
ret_log_pz = []
for i in reversed(list(range(depth))):
for j in reversed(list(range(num_blocks))):
downsample = i > 0 and j == 0
h = tf.nn.elu(x)
h_p = Conv2DWN(
"conv2d_p_%d_%d" % (i, j),
num_filters=2 * h_dims + 2 * z_dims,
stride=[1, 1],
kernel_size=[3, 3])(
inputs=h)
p_mean, p_raw_scale, down_context, h_det = tf.split(
h_p, [z_dims, z_dims, h_dims, h_dims], -1)
p_z = tfd.Independent(
tfd.Normal(loc=p_mean, scale=tf.nn.softplus(p_raw_scale)),
reinterpreted_batch_ndims=3)
if is_q:
h_q = Conv2DWN(
"conv2d_q_%d_%d" % (i, j),
num_filters=2 * z_dims,
stride=[1, 1],
kernel_size=[3, 3])(
inputs=h)
q_mean, q_raw_scale = tf.split(h_q, [z_dims, z_dims], -1)
context = down_context + up_contexts.pop()
q_mean += means.pop()
q_raw_scale += raw_scales.pop()
num_flat_dims = np.prod(q_mean.shape.as_list()[1:])
_maf_template = ConvAR(
"iaf_%d_%d" % (i, j),
real_event_shape=q_mean.shape.as_list()[1:],
hidden_layers=[h_dims, h_dims],
h=context,
weights_initializer=utils.L2HMCInitializer(factor=0.01))
def maf_template(x, t=_maf_template):
# TODO: I don't understand why the shape gets lost.
#x.set_shape([None, num_flat_dims])
x.set_shape([None] + q_mean.shape.as_list()[1:])
return t(x)
bijectors = []
#bijectors.append(tfb.Reshape(tf.shape(q_mean)[1:], [num_flat_dims]))
bijectors.append(
tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=maf_template)))
#bijectors.append(tfb.Reshape([num_flat_dims], tf.shape(q_mean)[1:]))
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
bijectors.append(tfb.AffineScalar(shift=q_mean, scale=tf.nn.softplus(q_raw_scale)))
bijector = tfb.Chain(bijectors)
mvn = tfd.Independent(
tfd.Normal(loc=tf.zeros_like(q_mean), scale=tf.ones_like(q_raw_scale)),
reinterpreted_batch_ndims=3)
q_z = tfd.TransformedDistribution(mvn, bijector)
if is_q:
dist = q_z
else:
dist = p_z
if z is None:
z_val = dist.sample()
else:
z_val = z[0]
z = z[1:]
ret_z.append(z_val)
ret_log_pz.append(dist.log_prob(z_val))
h = tf.concat([z_val, h_det], -1)
if downsample:
new_shape = [2 * int(x.shape[1]), 2 * int(x.shape[2])]
x = tf.image.resize_nearest_neighbor(x, new_shape)
h = tf.image.resize_nearest_neighbor(h, new_shape)
h = Conv2DWN("deconv2d_%d_%d" % (i, j), num_filters=h_dims, stride=[1, 1], kernel_size=[3, 3])(inputs=h)
x = x + 0.1 * h
x = tf.image.resize_nearest_neighbor(x, [2 * int(x.shape[1]), 2 * int(x.shape[2])])
x = Conv2DWN("conv2d_out", num_filters=3, stride=[1, 1], kernel_size=[5, 5])(inputs=x)
return ret_z, ret_log_pz, x
@gin.configurable("conv_encoder")
@utils.MakeTFTemplate
def ConvEncoder(images, num_outputs, hidden_dims = 450,
filter_scale = 1, fully_convolutional = False):
x = images
x = ResConv2D("res_1", num_filters=filter_scale * 16, kernel_size=[3, 3], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=filter_scale * 16, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_3", num_filters=filter_scale * 16, kernel_size=[3, 3], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_5", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[2, 2])(x)
x = tf.nn.elu(x)
if fully_convolutional:
return ResConv2D("res_out", num_filters=num_outputs, kernel_size=[3, 3], stride=[1, 1])(x)
else:
x = tf.reshape(x, [-1, filter_scale * 32 * 4 * 4])
x = ResDense("dense_h", num_dims=hidden_dims, activation=tf.nn.elu)(x)
return DenseWN(
"dense_out",
num_outputs=num_outputs,
weights_initializer=utils.L2HMCInitializer())(
x)
@gin.configurable("conv_decoder")
@utils.MakeTFTemplate
def ConvDecoder(encoding,
output_shape,
filter_scale = 1,
hidden_dims = 450,
fully_convolutional = False):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
if fully_convolutional:
tf.logging.info("Encoding shape: %s", encoding.shape)
x = ResConv2D("res_in", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[1, 1])(encoding)
else:
x = ResDense("dense_in", num_dims=hidden_dims, activation=tf.nn.elu)(encoding)
x = ResDense("dense_h", num_dims=filter_scale * 32 * 4 * 4, activation=tf.nn.elu)(x)
x = tf.reshape(x, [-1, 4, 4, filter_scale * 32])
x = tf.image.resize_nearest_neighbor(x, [8, 8])
x = ResConv2D("res_5", num_filters=32 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=32 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
if output_shape[1] == 28:
# 8x8 -> 7x7
x = x[:, 1:, 1:, :]
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = ResConv2D("res_3", num_filters=16 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=16 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = ResConv2D(
"res_1",
num_filters=output_shape[-1],
kernel_size=[3, 3],
stride=[1, 1],
output_init_factor=0.01)(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("conv_encoder2")
@utils.MakeTFTemplate
def ConvEncoder2(images, num_outputs, filter_scale = 1):
x = images
x = Conv2DWN("conv_1", num_filters=filter_scale * 16, kernel_size=[5, 5], stride=[2, 2], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_2", num_filters=filter_scale * 16, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_3", num_filters=filter_scale * 16, kernel_size=[5, 5], stride=[2, 2], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_4", num_filters=filter_scale * 32, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_5", num_filters=filter_scale * 32, kernel_size=[5, 5], stride=[2, 2], activation=tf.nn.elu)(x)
return ResConv2D("conv_out", num_filters=num_outputs, kernel_size=[3, 3], stride=[1, 1])(x)
@gin.configurable("conv_decoder2")
@utils.MakeTFTemplate
def ConvDecoder2(encoding,
output_shape,
filter_scale = 1):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = Conv2DWN("conv_in", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[1, 1])(encoding)
x = tf.image.resize_nearest_neighbor(x, [8, 8])
x = Conv2DWN("conv_5", num_filters=32 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_4", num_filters=32 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
if output_shape[1] == 28:
# 8x8 -> 7x7
x = x[:, 1:, 1:, :]
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = Conv2DWN("conv_3", num_filters=16 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_2", num_filters=16 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = Conv2DWN(
"conv_1",
num_filters=output_shape[-1],
kernel_size=[5, 5],
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(0.01))(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("conv_encoder3")
@utils.MakeTFTemplate
def ConvEncoder3(images, num_outputs, hidden_dims = 450,
filter_scale = 1):
# This comes from VLAE paper.
x = images
x = ResConv2D("res_1", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN("conv_3", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_5", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN("conv_6", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_7", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_8", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_9", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
return Conv2DWN("conv_10", num_filters=num_outputs, kernel_size=[1, 1], stride=[1, 1])(x)
@gin.configurable("conv_decoder3")
@utils.MakeTFTemplate
def ConvDecoder3(encoding,
output_shape,
filter_scale = 1):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = encoding
x = Conv2DWN("conv_1", num_filters=filter_scale * 96, kernel_size=[1, 1], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_3", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = Conv2DWN("conv_5", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_6", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_7", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = Conv2DWN("conv_8", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_9", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_10", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN(
"conv_out",
num_filters=output_shape[-1],
kernel_size=[5, 5],
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(0.01))(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("conv_encoder4")
@utils.MakeTFTemplate
def ConvEncoder4(images, num_outputs,
filter_scale = 1,
fully_convolutional = False):
x = images
x = Conv2DWN("conv_1", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = Conv2DWN("conv_2", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
if fully_convolutional:
return Conv2DWN("conv_out", num_filters=num_outputs, kernel_size=[1, 1], stride=[1, 1])(x)
else:
return DenseWN("dense_out", num_outputs=num_outputs)(tf.layers.flatten(x))
@gin.configurable("conv_decoder4")
@utils.MakeTFTemplate
def ConvDecoder4(encoding,
output_shape,
filter_scale = 1,
fully_convolutional = False):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = encoding
if not fully_convolutional:
x = tf.reshape(DenseWN("dense_in", num_outputs=8*8*16)(x), [-1, 8, 8, 16])
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = Conv2DWN("conv_1", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = Conv2DWN("conv_2", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN(
"conv_out",
num_filters=output_shape[-1],
kernel_size=[1, 1],
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(0.01))(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("dense_encoder")
@utils.MakeTFTemplate
def DenseEncoder(images,
num_outputs,
hidden_layer_sizes = [1024, 1024],
activation=tf.nn.elu):
x = tf.layers.flatten(images)
# Center the data, assuming it goes from [0, 1] initially.
# x = 2.0 * x - 1.0
for size in hidden_layer_sizes:
x = tf.layers.dense(
x, size, activation=activation, kernel_initializer=utils.L2HMCInitializer())
return tf.layers.dense(x, num_outputs, kernel_initializer=utils.L2HMCInitializer())
@gin.configurable("dense_decoder")
@utils.MakeTFTemplate
def DenseDecoder(encoding,
output_shape,
hidden_layer_sizes = [1024, 1024],
activation=tf.nn.elu):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = tf.layers.flatten(encoding)
for size in hidden_layer_sizes:
x = tf.layers.dense(
x, size, activation=activation, kernel_initializer=utils.L2HMCInitializer())
num_outputs = np.prod(output_shape)
return tf.reshape(
tf.layers.dense(
x, num_outputs, kernel_initializer=utils.L2HMCInitializer(factor=0.01)),
[-1] + output_shape)
def IndependentBernouli3D(logits):
return tfd.Independent(
tfd.Bernoulli(logits=logits), reinterpreted_batch_ndims=3)
def IndependentDiscreteLogistic3D(locations,
scales):
dist = tfd.TransformedDistribution(
distribution=tfd.Logistic(loc=locations, scale=scales),
bijector=tfb.AffineScalar(scale=255.0))
dist = tfd.QuantizedDistribution(distribution=dist, low=0., high=255.0)
dist = tfd.Independent(dist, reinterpreted_batch_ndims=3)
class ScaleHack(object):
def __init__(self, dist):
self._dist = dist
def sample(self, *args, **kwargs):
return self._dist.sample(*args, **kwargs) / 255.0
def log_prob(self, x, *args, **kwargs):
return self._dist.log_prob(tf.clip_by_value(x * 255.0, 0.0, 255.0), *args, **kwargs)
return ScaleHack(dist)
def IndependentDiscreteLogistic3D2(locations,
scales):
class IndependentDiscreteLogistic(object):
def __init__(self, loc, scale):
self._loc = loc
self._scale = scale
def sample(self, *args, **kwargs):
dist = tfd.Logistic(loc=self._loc, scale=self._scale)
return tf.clip_by_value(dist.sample(*args, **kwargs), 0.0, 1.0)
def log_prob(self, x, *args, **kwargs):
sample = x
mean = self._loc
scales = self._scale
binsize=1.0 / 256.0
sample = (tf.floor(sample / binsize) * binsize - mean) / scales
return tf.reduce_sum(
tf.log(
tf.sigmoid(sample + binsize / scales) - tf.sigmoid(sample) + 1e-7),
[-1, -2, -3])
return IndependentDiscreteLogistic(locations, scales)
@gin.configurable("dense_recognition")
@utils.MakeTFTemplate
def DenseRecognition(images, encoder, z=None, sigma_activation="exp"
):
"""Models Q(z | encoder(x))"""
encoding = encoder(images)
num_dims = int(encoding.shape[-1]) // 2
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1, num_dims, 2]), num=2, axis=-1)
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijector = tfb.Affine(shift=mu, scale_diag=sigma)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
tf.logging.info("bijector z shape: %s", z[0].shape)
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_affine")
@utils.MakeTFTemplate
def DenseRecognitionAffine(images, encoder, z=None,
z_dims=None):
"""Models Q(z | encoder(x))"""
encoding = encoder(images)
mu = encoding[:, :z_dims]
tril_raw = tfd.fill_triangular(encoding[:, z_dims:])
sigma = tf.nn.softplus(tf.matrix_diag_part(tril_raw))
tril = tf.linalg.set_diag(tril_raw, sigma)
bijector = tfb.Affine(shift=mu, scale_tril=tril)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_affine_lr")
@utils.MakeTFTemplate
def DenseRecognitionAffineLR(images, encoder, z=None,
z_dims=None, rank=1):
"""Models Q(z | encoder(x))"""
encoding = encoder(images)
mu = encoding[:, :z_dims]
sigma = encoding[:, z_dims:2*z_dims]
perturb = encoding[:, 2*z_dims:]
perturb = tf.reshape(perturb, [-1, z_dims, rank])
sigma = tf.nn.softplus(sigma)
bijector = tfb.Affine(shift=mu, scale_diag=sigma,
scale_perturb_factor=perturb)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_rnvp")
@utils.MakeTFTemplate
def DenseRecognitionRNVP(
images,
encoder,
z=None,
num_bijectors=3,
condition_bijector=False,
layer_sizes=[128, 128],
sigma_activation="exp"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if condition_bijector:
num_parts = 3
else:
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1, num_dims, num_parts]), num=num_parts, axis=-1)
if condition_bijector:
h = encoding_parts[2]
else:
h = None
swap = tfb.Permute(permutation=np.arange(num_dims - 1, -1, -1))
bijectors = []
for i in range(num_bijectors):
_rnvp_template = utils.DenseShiftLogScale(
"rnvp_%d" % i,
h=h,
hidden_layers=layer_sizes,
activation=tf.nn.softplus,
kernel_initializer=utils.L2HMCInitializer(factor=0.01))
def rnvp_template(x, output_units, t=_rnvp_template):
# TODO: I don't understand why the shape gets lost.
x.set_shape([None, num_dims - output_units])
return t(x, output_units)
bijectors.append(
tfb.Invert(
tfb.RealNVP(
num_masked=num_dims // 2,
shift_and_log_scale_fn=rnvp_template)))
bijectors.append(swap)
# Drop the last swap.
bijectors = bijectors[:-1]
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijectors.append(tfb.Affine(shift=mu, scale_diag=sigma))
bijector = tfb.Chain(bijectors)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_iaf")
@utils.MakeTFTemplate
def DenseRecognitionIAF(
images,
encoder,
z=None,
num_iaf_layers=2,
iaf_layer_sizes=[128, 128],
condition_iaf=False,
sigma_activation="exp"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if condition_iaf:
num_parts = 3
else:
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1, num_dims, num_parts]), num=num_parts, axis=-1)
if condition_iaf:
h = encoding_parts[2]
else:
h = None
swap = tfb.Permute(permutation=np.arange(num_dims - 1, -1, -1))
bijectors = []
for i in range(num_iaf_layers):
#_maf_template = tfb.masked_autoregressive_default_template(
# hidden_layers=iaf_layer_sizes,
# activation=tf.nn.softplus,
# kernel_initializer=utils.L2HMCInitializer(factor=0.01))
_maf_template = utils.DenseAR(
"maf_%d" % i,
hidden_layers=iaf_layer_sizes,
h=h,
activation=tf.nn.softplus,
kernel_initializer=utils.L2HMCInitializer(factor=0.01))
def maf_template(x, t=_maf_template):
# TODO: I don't understand why the shape gets lost.
x.set_shape([None, num_dims])
return t(x)
bijectors.append(
tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=maf_template)))
bijectors.append(swap)
# Drop the last swap.
bijectors = bijectors[:-1]
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijectors.append(tfb.Affine(shift=mu, scale_diag=sigma))
bijector = tfb.Chain(bijectors)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
class FlipImageBijector(tfb.Bijector):
def __init__(self, validate_args=False, name=None):
"""Creates the `Permute` bijector.
Args:
permutation: An `int`-like vector-shaped `Tensor` representing the
permutation to apply to the rightmost dimension of the transformed
`Tensor`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
TypeError: if `not permutation.dtype.is_integer`.
ValueError: if `permutation` does not contain exactly one of each of
`{0, 1, ..., d}`.
"""
super(FlipImageBijector, self).__init__(
forward_min_event_ndims=3,
is_constant_jacobian=True,
validate_args=validate_args,
name=name or "flip_image")
def _forward(self, x):
return tf.image.flip_left_right(tf.image.flip_up_down(x))
def _inverse(self, y):
return tf.image.flip_up_down(tf.image.flip_left_right(y))
def _inverse_log_det_jacobian(self, y):
# is_constant_jacobian = True for this bijector, hence the
# `log_det_jacobian` need only be specified for a single input, as this will
# be tiled to match `event_ndims`.
return tf.constant(0., dtype=y.dtype.base_dtype)
def _forward_log_det_jacobian(self, x):
return tf.constant(0., dtype=x.dtype.base_dtype)
@gin.configurable("conv_iaf")
@utils.MakeTFTemplate
def ConvIAF(
images,
encoder,
z=None,
num_iaf_layers=2,
iaf_layer_sizes=[128, 128],
condition_iaf=False,
sigma_activation="softplus"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if encoding.shape.ndims != 4:
raise ValueError("ConvIAF requires a convolutional encoder. %s", encoding.shape)
if condition_iaf:
num_parts = 3
else:
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1] + encoding.shape.as_list()[1:-1] + [num_dims, num_parts]), num=num_parts, axis=-1)
if condition_iaf:
h = encoding_parts[2]
else:
h = None
bijectors = []
for i in range(num_iaf_layers):
_maf_template = ConvAR(
"iaf_%d" % i,
real_event_shape=encoding_parts[0].shape.as_list()[1:],
hidden_layers=iaf_layer_sizes,
h=h,
weights_initializer=utils.L2HMCInitializer(factor=0.01))
def maf_template(x, t=_maf_template):
# TODO: I don't understand why the shape gets lost.
x.set_shape([None] + encoding_parts[0].shape.as_list()[1:])
return t(x)
bijectors.append(
tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=maf_template)))
bijectors.append(FlipImageBijector())
# Drop the last swap.
bijectors = bijectors[:-1]
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijectors.append(tfb.AffineScalar(shift=mu, scale=sigma))
bijector = tfb.Chain(bijectors)
mvn = tfd.Independent(
tfd.Normal(loc=tf.zeros_like(mu), scale=tf.ones_like(sigma)),
reinterpreted_batch_ndims=3)
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("conv_shift_scale")
@utils.MakeTFTemplate
def ConvShiftScale(
images,
encoder,
z=None,
sigma_activation="softplus"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if encoding.shape.ndims != 4:
raise ValueError("ConvIAF requires a convolutional encoder. %s", encoding.shape)
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1] + encoding.shape.as_list()[1:-1] + [num_dims, num_parts]), num=num_parts, axis=-1)
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijector = tfb.AffineScalar(shift=mu, scale=sigma)
mvn = tfd.Independent(
tfd.Normal(loc=tf.zeros_like(mu), scale=tf.ones_like(sigma)),
reinterpreted_batch_ndims=3)
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@utils.MakeTFTemplate
def SimplePrior(z=None, batch=None,
num_dims=None):
"""Models P(z)"""
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros(num_dims), scale_diag=tf.ones(num_dims))
if z is None:
z = [mvn.sample(batch)]
return z, [mvn.log_prob(z[0])] # pytype: disable=bad-return-type
@utils.MakeTFTemplate
def Simple3DPrior(z=None, batch=None,
shape=None):
"""Models P(z)"""
mvn = tfd.Independent(tfd.Normal(loc=tf.zeros(shape), scale=tf.ones(shape)), reinterpreted_batch_ndims=3)
if z is None:
z = [mvn.sample(batch)]
return z, [mvn.log_prob(z[0])] # pytype: disable=bad-return-type
@utils.MakeTFTemplate
def DenseMNISTNoise(x=None, z=None, decoder=None, return_means=True):
"""Models P(x | decoder(z))"""
decoding = decoder(z)
bernoulli = IndependentBernouli3D(decoding)
if x is None:
if return_means:
x = bernoulli.mean()
else:
x = tf.to_float(bernoulli.sample())
return x, bernoulli.log_prob(x)
@gin.configurable("cifar10_noise")
@utils.MakeTFTemplate
def DenseCIFAR10TNoise(x=None, z=None, decoder=None, return_means=True, uniform_scale=False, logistic_impl="mine"):
"""Models P(x | decoder(z))"""
decoding = decoder(z)
if uniform_scale:
scale = tf.get_variable("scale", initializer=1.0)
scales = tf.reshape(scale, [1, 1, 1])
else:
scales = tf.get_variable(
"scales", [32, 32, 3], initializer=tf.ones_initializer())
if logistic_impl == "mine":
disc_logistic = IndependentDiscreteLogistic3D(decoding, tf.nn.softplus(scales))
elif logistic_impl == "kingma":
disc_logistic = IndependentDiscreteLogistic3D2(decoding, tf.nn.softplus(scales))
if x is None:
x = tf.to_float(disc_logistic.sample())
return x, disc_logistic.log_prob(x)
@gin.configurable("learning_rate")
def LearningRate(train_size, global_step, schedule = "hoffman", warmup_steps=0):
if schedule == "hoffman":
base = tf.train.piecewise_constant(
global_step, [train_size * 500 // TRAIN_BATCH], [1e-3, 1e-4])
elif schedule == "new":
base = tf.train.piecewise_constant(
global_step,
[train_size * 500 // TRAIN_BATCH, train_size * 800 // TRAIN_BATCH],
[1e-3, 1e-4, 1e-5])
elif schedule == "new_gentle":
base = tf.train.piecewise_constant(
global_step,
[train_size * 500 // TRAIN_BATCH, train_size * 800 // TRAIN_BATCH],
[0.5e-3, 1e-4, 1e-5])
elif schedule == "fast":
base = tf.train.piecewise_constant(
global_step,
[train_size * 800 // TRAIN_BATCH],
[1e-2, 1e-5])
else:
raise ValueError("Invalid schedule: " + schedule)
if warmup_steps == 0:
return base
else:
return tf.minimum(base * tf.to_float(global_step) / warmup_steps, base)
VAEOutputs = collections.namedtuple(
"VAEOutputs", "log_p_x_z, elbo, sample_means, recon_means, klqp, total_klqp, post_z, prior_z")
AISOutputs = collections.namedtuple(
"AISOutputs",
"log_p, p_accept, z_fin, recon"
)
def MakeVAE(images, recognition, prior, noise, beta, num_samples,
min_kl):
z, log_q_z = recognition(images)
_, log_p_z = prior(z)
_, log_p_x_z = noise(images, z)
post_z = z
log_q_z = [tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z]
log_p_z = [tf.reduce_mean(layer_log_p_z) for layer_log_p_z in log_p_z]
log_p_x_z = tf.reduce_mean(log_p_x_z)
klqp = [layer_log_q_z - layer_log_p_z for layer_log_q_z, layer_log_p_z in zip(log_q_z, log_p_z)]
klqp = [tf.maximum(min_kl, layer_klqp) for layer_klqp in klqp]
total_klqp = tf.add_n(klqp)
elbo = log_p_x_z - beta * total_klqp
recon_means, _ = noise(None, z)
z, _ = prior(batch=num_samples)
sample_means, _ = noise(None, z)
return VAEOutputs(
log_p_x_z=log_p_x_z,
elbo=elbo,
sample_means=sample_means,
recon_means=recon_means,
klqp=klqp,
total_klqp=total_klqp,
post_z=post_z,
prior_z=z)
DLGMOutputs = collections.namedtuple(
"DLGMOutputs",
"elbo, sample_means, mcmc_log_p, recon_means, p_accept, post_z, post_z_chain, q_z, xentpq"
)
@gin.configurable("dlgm")
class DLGM(object):
def __init__(self,
z_dims=64,
beta=1.0,
beta_steps=0,
step_size=0.2,
num_leapfrog_steps=5,
num_hmc_steps=2,
use_neutra=True,
condition_bijector=False,
bijector_type="iaf",
encoder_type="dense",
q_loss_type="klqp",
min_kl=0.0,
symm_factor=0.5,
save_chain_state=False,
chain_warmup_epochs=5,
use_q_z_for_gen=False,
no_gen_train_steps=0,
dataset=None,
use_bijector_for_ais=False,
prior_type="simple",
adapt_step_size=False,
step_size_gain=1e-3,
use_q_z_for_ais=False,
affine_rank=1,
step_size_warmup=0):
self.train_size = dataset.train_size
self._use_q_z_for_ais = use_q_z_for_ais
if dataset.name == "mnist":
output_shape = [28, 28, 1]
elif dataset.name == "cifar10":
output_shape = [32, 32, 3]
self._z_dims = z_dims
self._use_bijector_for_ais = use_bijector_for_ais
if beta_steps > 0:
frac = tf.to_float(
tf.train.get_or_create_global_step()) / tf.to_float(beta_steps)
frac = tf.minimum(frac, 1.0)
self._beta = frac * beta
else:
self._beta = tf.constant(beta)
self._min_kl = tf.to_float(min_kl)
self._use_neutra = use_neutra
self._num_leapfrog_steps = num_leapfrog_steps
self._num_hmc_steps = num_hmc_steps
self._q_loss_type = q_loss_type
self._symm_factor = symm_factor
self._save_chain_state = save_chain_state
self._chain_warmup_epochs = chain_warmup_epochs
self._use_q_z_for_gen = use_q_z_for_gen
self._no_gen_train_steps = no_gen_train_steps
self._step_size_gain = step_size_gain
self._adapt_step_size = adapt_step_size
self._step_size_warmup = step_size_warmup
self._init_step_size = step_size
if self._adapt_step_size:
self._step_size = tf.get_variable("step_size", initializer=step_size)
else:
self._step_size = tf.constant(step_size)
if self._save_chain_state:
self._chain_state = tf.get_variable(
"train_chain_state", [self.train_size, z_dims], trainable=False)
if bijector_type == "affine":
# TriL + shift
num_outputs = (z_dims * (z_dims + 1)) // 2 + z_dims
elif bijector_type == "affine_lr":
num_outputs = z_dims * 2 + z_dims * affine_rank
elif condition_bijector and bijector_type not in ["conv_shift_scale", "shift_scale"]:
num_outputs = 3 * z_dims
else:
num_outputs = 2 * z_dims
if encoder_type == "hier_conv":
#assert dataset.name == "cifar10"
#self._encoder = ConvHierEncoder("encoder")
#self._prior_posterior = ConvHierPriorPost("prior_post")
#self._decoder = lambda z: self._prior_posterior(z=z)[2]
#self._prior = lambda z=None, batch=None: self._prior_posterior(z=z, batch=batch)[:2]
#self._recog = lambda images, z=None: self._prior_posterior(images=images, z=z, encoder=self._encoder)[:2]
pass
else:
if encoder_type == "dense":
self._encoder = DenseEncoder(
"encoder", num_outputs=num_outputs, activation=tf.nn.softplus)
self._decoder = DenseDecoder(
"decoder", output_shape=output_shape, activation=tf.nn.softplus)
elif encoder_type == "conv":
self._encoder = ConvEncoder("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv2":
self._encoder = ConvEncoder2("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder2("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv3":
self._encoder = ConvEncoder3("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder3("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
elif encoder_type == "conv4":
self._encoder = ConvEncoder4("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder4("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
if prior_type == "simple":
self._prior = SimplePrior("prior", num_dims=self._z_dims)
elif prior_type == "simple_3d":
self._prior = Simple3DPrior("prior", shape=conv_z_shape)
if bijector_type == "iaf":
recog = DenseRecognitionIAF(
"recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "rnvp":
recog = DenseRecognitionRNVP(
"recog",
encoder=self._encoder,
condition_bijector=condition_bijector)
elif bijector_type == "shift_scale":
recog = DenseRecognition(
"recog",
encoder=self._encoder)
elif bijector_type == "conv_shift_scale":
recog = ConvShiftScale("recog", encoder=self._encoder)
elif bijector_type == "affine":
recog = DenseRecognitionAffine("recog", encoder=self._encoder, z_dims=z_dims)
elif bijector_type == "affine_lr":
recog = DenseRecognitionAffineLR("recog", encoder=self._encoder, z_dims=z_dims, rank=affine_rank)
elif bijector_type == "conv_iaf":
recog = ConvIAF("recog", encoder=self._encoder, condition_iaf=condition_bijector)
self._recog = recog
if dataset.name == "mnist":
self._noise = DenseMNISTNoise("noise", decoder=self._decoder)
else:
self._noise = DenseCIFAR10TNoise("noise", decoder=self._decoder)
def AdjustedStepSize(self):
if self._step_size_warmup > 0:
global_step = tf.train.get_or_create_global_step()
max_step = self._init_step_size * tf.to_float(
global_step) / self._step_size_warmup
return tf.where(global_step > self._step_size_warmup, self._step_size,
tf.minimum(max_step, self._step_size))
else:
return self._step_size
def RecogVars(self):
return self._encoder.variables + self._recog.variables
def GenVars(self):
return (
self._prior.variables + self._decoder.variables + self._noise.variables)
def MakeDLGM(self,
images,
other_z_init=None,
use_other_z_init=None,
num_samples=64):
z, log_q_z, bijector = self._recog(images)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
post_z = z
q_z = z
if use_other_z_init is not None:
z_init = [tf.cond(use_other_z_init, lambda: tf.identity(other_layer_z),
lambda: tf.identity(layer_z)) for other_layer_z, layer_z in zip(z, other_z_init)]
z_init = z
log_q_z = [tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z]
log_p_z = [tf.reduce_mean(layer_log_p_z) for layer_log_p_z in log_p_z]
log_p_x_z = tf.reduce_mean(log_p_x_z)
klqp = [layer_log_q_z - layer_log_p_z for layer_log_q_z, layer_log_p_z in zip(log_q_z, log_p_z)]
klqp = [tf.maximum(self._min_kl, layer_klqp) for layer_klqp in klqp]
total_klqp = tf.add_n(klqp)
elbo = log_p_x_z - self._beta * total_klqp
def TargetLogProbFn(*z):
for post_z_e, z_e in zip(post_z, z):
tf.logging.info("Shape here: %s %s", post_z_e.shape, z_e.shape)
z_e.set_shape(post_z_e.shape)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=TargetLogProbFn,
step_size=self.AdjustedStepSize(),
num_leapfrog_steps=self._num_leapfrog_steps)
if self._use_neutra:
kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=kernel, bijector=bijector)
states, kernel_results = tfp.mcmc.sample_chain(
num_results=self._num_hmc_steps, current_state=z, kernel=kernel)
z = [tf.stop_gradient(s[-1, Ellipsis]) for s in states]
post_z = z
_, log_q_z, _ = self._recog(images, z=z)
xentpq = -tf.add_n([tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z])
if self._use_q_z_for_gen:
z = q_z
recon_means, _ = self._noise(None, z)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
mcmc_log_p = tf.reduce_mean(tf.add_n(log_p_z) + log_p_x_z)
if self._use_neutra:
log_accept_ratio = kernel_results.inner_results.log_accept_ratio
else:
log_accept_ratio = kernel_results.log_accept_ratio
p_accept = tf.reduce_mean(tf.exp(tf.minimum(log_accept_ratio, 0.)))
z, _ = self._prior(batch=num_samples)
sample_means, _ = self._noise(None, z)
return DLGMOutputs(
elbo=elbo,
sample_means=sample_means,
mcmc_log_p=mcmc_log_p,
recon_means=recon_means,
p_accept=p_accept,
post_z=post_z,
post_z_chain=states,
q_z=z_init,
xentpq=xentpq)
def GetPosterior(self, images):
outputs = self.MakeDLGM(images)
return outputs.post_z
def TrainOp(self, data_idx, images):
global_step = tf.train.get_or_create_global_step()
learning_rate = LearningRate(self.train_size, global_step)
if self._save_chain_state:
other_z_init = tf.gather(self._chain_state, data_idx)
use_other_z_init = (
global_step > self._chain_warmup_epochs * self.train_size // TRAIN_BATCH)
else:
other_z_init = None
use_other_z_init = None
outputs = self.MakeDLGM(
images, other_z_init=other_z_init, use_other_z_init=use_other_z_init)
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
#gen_opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
utils.LogAndSummarizeMetrics({
"learning_rate": learning_rate,
"elbo": outputs.elbo,
"mcmc_log_p": outputs.mcmc_log_p,
"mcmc_p_accept": outputs.p_accept,
"step_size": self.AdjustedStepSize(),
}, False)
tf.summary.image(
"sample_means", utils.StitchImages(outputs.sample_means))
if self._save_chain_state:
with tf.control_dependencies([outputs.post_z]):
chain_state_update_op = tf.scatter_update(self._chain_state, data_idx,
outputs.post_z)
else:
chain_state_update_op = tf.no_op()
if self._adapt_step_size:
new_step_size = self._step_size + self._step_size_gain * (outputs.p_accept - 0.651)
new_step_size = tf.clip_by_value(new_step_size, 1e-3, 0.5)
step_size_op = self._step_size.assign(
tf.where(global_step > self._step_size_warmup, new_step_size,
self._step_size))
else:
step_size_op = tf.no_op()
with tf.name_scope("recog_train"):
if self._q_loss_type == "klqp":
loss = -outputs.elbo
elif self._q_loss_type == "symm":
loss = (
self._symm_factor * -outputs.elbo +
(1.0 - self._symm_factor) * outputs.xentpq)
elif self._q_loss_type == "klpq":
loss = outputs.xentpq
if self._save_chain_state:
# Not super efficient...
loss = tf.cond(use_other_z_init, lambda: tf.identity(loss),
lambda: tf.identity(-outputs.elbo))
recog_train_op = tf.contrib.training.create_train_op(
loss,
opt,
summarize_gradients=True,
variables_to_train=self.RecogVars(),
transform_grads_fn=utils.ProcessGradients)
with tf.name_scope("gen_train"):
gen_loss = tf.cond(global_step < self._no_gen_train_steps,
lambda: -outputs.elbo, lambda: -outputs.mcmc_log_p)
gen_train_op = tf.contrib.training.create_train_op(
gen_loss,
opt,
None,
summarize_gradients=True,
variables_to_train=self.GenVars(),
transform_grads_fn=utils.ProcessGradients)
return tf.group(recog_train_op, gen_train_op, chain_state_update_op, step_size_op)
def EvalOp(self, data_idx, images):
outputs = self.MakeDLGM(images)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image(
"recon_means", utils.StitchImages(outputs.recon_means[:64]))
return utils.LogAndSummarizeMetrics({
"elbo": outputs.elbo,
"xentpq": outputs.xentpq,
"mcmc_log_p": outputs.mcmc_log_p,
"mcmc_p_accept": outputs.p_accept,
})
def AIS(self, images, num_chains):
def ProposalLogProbFn(*z):
if self._use_q_z_for_ais:
_, log_p_z, _ = self._recog(images, z=z)
else:
_, log_p_z = self._prior(z)
return tf.add_n(log_p_z)
def TargetLogProbFn(*z):
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
images = tf.tile(images, [num_chains, 1, 1, 1])
if self._use_q_z_for_ais:
z_init, _, _ = self._recog(images)
else:
z_init, _ = self._prior(batch=tf.shape(images)[0])
if self._use_bijector_for_ais:
_, _, bijector = self._recog(images)
else:
bijector = None
ais_outputs = utils.AIS(ProposalLogProbFn, TargetLogProbFn, z_init, bijector=bijector)
recons, _ = self._noise(None, ais_outputs.z_fin)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image("recon_means", utils.StitchImages(recons[:64]))
tf.summary.scalar("p_accept", tf.reduce_mean(ais_outputs.p_accept))
return AISOutputs(
log_p=tf.reduce_logsumexp(
tf.reshape(ais_outputs.log_p, [num_chains, -1]) - tf.log(
tf.to_float(num_chains)), 0),
p_accept=ais_outputs.p_accept,
recon=recons,
z_fin=ais_outputs.z_fin)
@gin.configurable("vae")
class VAE(object):
def __init__(self,
z_dims=64,
condition_bijector=False,
bijector_type="iaf",
encoder_type="dense",
beta=1.0,
beta_steps=0,
min_kl=0,
use_q_z_for_ais=False,
dataset=None,
prior_type="simple",
affine_rank=1):
self.train_size = dataset.train_size
if dataset.name == "mnist":
output_shape = [28, 28, 1]
elif dataset.name == "cifar10":
output_shape = [32, 32, 3]
self._z_dims = z_dims
self._beta = beta
self._use_q_z_for_ais = use_q_z_for_ais
if beta_steps > 0:
frac = tf.to_float(
tf.train.get_or_create_global_step()) / tf.to_float(beta_steps)
frac = tf.minimum(frac, 1.0)
self._beta = frac * beta
else:
self._beta = tf.constant(beta)
self._min_kl = tf.to_float(min_kl)
if bijector_type == "affine":
# TriL + shift
num_outputs = (z_dims * (z_dims + 1)) // 2 + z_dims
elif bijector_type == "affine_lr":
num_outputs = z_dims * 2 + z_dims * affine_rank
elif condition_bijector and bijector_type not in ["conv_shift_scale", "shift_scale"]:
num_outputs = 3 * z_dims
else:
num_outputs = 2 * z_dims
if encoder_type == "hier_conv":
assert dataset.name == "cifar10"
self._encoder = ConvHierEncoder("encoder")
self._prior_posterior = ConvHierPriorPost("prior_post")
self._decoder = lambda z: self._prior_posterior(z=z)[2]
self._prior = lambda z=None, batch=None: self._prior_posterior(z=z, batch=batch)[:2]
self._recog = lambda images, z=None: self._prior_posterior(images=images, z=z, encoder=self._encoder)[:2]
else:
if encoder_type == "dense":
self._encoder = DenseEncoder(
"encoder", num_outputs=num_outputs, activation=tf.nn.softplus)
self._decoder = DenseDecoder(
"decoder", output_shape=output_shape, activation=tf.nn.softplus)
elif encoder_type == "conv":
self._encoder = ConvEncoder("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv2":
self._encoder = ConvEncoder2("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder2("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv3":
self._encoder = ConvEncoder3("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder3("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
elif encoder_type == "conv4":
self._encoder = ConvEncoder4("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder4("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
if prior_type == "simple":
self._prior = SimplePrior("prior", num_dims=self._z_dims)
elif prior_type == "simple_3d":
self._prior = Simple3DPrior("prior", shape=conv_z_shape)
if bijector_type == "iaf":
recog = DenseRecognitionIAF(
"recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "rnvp":
recog = DenseRecognitionRNVP(
"recog",
encoder=self._encoder,
condition_bijector=condition_bijector)
elif bijector_type == "shift_scale":
recog = DenseRecognition("recog", encoder=self._encoder)
elif bijector_type == "conv_shift_scale":
recog = ConvShiftScale("recog", encoder=self._encoder)
elif bijector_type == "affine":
recog = DenseRecognitionAffine("recog", encoder=self._encoder, z_dims=z_dims)
elif bijector_type == "conv_iaf":
recog = ConvIAF("recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "affine_lr":
recog = DenseRecognitionAffineLR("recog", encoder=self._encoder, z_dims=z_dims, rank=affine_rank)
# Drop the bijector return.
self._recog = lambda *args, **kwargs: recog(*args, **kwargs)[:2]
if dataset.name == "mnist":
self._noise = DenseMNISTNoise("noise", decoder=self._decoder)
else:
self._noise = DenseCIFAR10TNoise("noise", decoder=self._decoder)
def MakeVAE(self, images, beta_override=None, num_samples=64):
if beta_override is not None:
beta = beta_override
else:
beta = self._beta
return MakeVAE(images, self._recog, self._prior, self._noise, beta,
num_samples, self._min_kl)
def TrainOp(self, data_idx, images):
outputs = self.MakeVAE(images)
global_step = tf.train.get_or_create_global_step()
learning_rate = LearningRate(self.train_size, global_step)
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
metrics = {
"learning_rate": learning_rate,
"log_p_x_z": outputs.log_p_x_z,
"elbo": outputs.elbo,
"klqp": outputs.total_klqp,
"beta": self._beta,
}
for i, layer_klqp in enumerate(outputs.klqp):
metrics["klqp_%d"%i] = layer_klqp
utils.LogAndSummarizeMetrics(metrics, False)
tf.summary.image(
"sample_means", utils.StitchImages(outputs.sample_means))
return tf.contrib.training.create_train_op(
-outputs.elbo,
opt,
summarize_gradients=True,
transform_grads_fn=utils.ProcessGradients)
def GetPosterior(self, images):
outputs = self.MakeVAE(images)
return outputs.post_z
def EvalOp(self, data_idx, images):
outputs = self.MakeVAE(images, 1.0)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image(
"recon_means", utils.StitchImages(outputs.recon_means[:64]))
metrics = {
"elbo": outputs.elbo,
"klqp": outputs.total_klqp,
}
for i, layer_klqp in enumerate(outputs.klqp):
metrics["klqp_%d"%i] = layer_klqp
return utils.LogAndSummarizeMetrics(metrics)
def AIS(self, images, num_chains):
outputs = self.MakeVAE(images)
def ProposalLogProbFn(*z):
if self._use_q_z_for_ais:
_, log_p_z = self._recog(images, z=z)
else:
_, log_p_z = self._prior(z)
return tf.add_n(log_p_z)
def TargetLogProbFn(*z):
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
images = tf.tile(images, [num_chains, 1, 1, 1])
if self._use_q_z_for_ais:
z_init, _ = self._recog(images)
else:
z_init, _ = self._prior(batch=tf.shape(images)[0])
ais_outputs = utils.AIS(ProposalLogProbFn, TargetLogProbFn, z_init)
recons, _ = self._noise(None, ais_outputs.z_fin)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image("recon_means", utils.StitchImages(recons[:64]))
tf.summary.scalar("p_accept", tf.reduce_mean(ais_outputs.p_accept))
return AISOutputs(
log_p=tf.reduce_logsumexp(
tf.reshape(ais_outputs.log_p, [num_chains, -1]) - tf.log(
tf.to_float(num_chains)), 0),
p_accept=ais_outputs.p_accept,
recon=recons,
z_fin=ais_outputs.z_fin)
@gin.configurable("train")
def Train(model, dataset, train_dir, master, epochs=600, polyak_averaging=0.0, warmstart_ckpt=""):
data_idx, images = dataset.TrainBatch(TRAIN_BATCH, epochs)
train_op = model.TrainOp(data_idx, images)
if polyak_averaging > 0.0:
tf.logging.info("Using polyak averaging")
ema = tf.train.ExponentialMovingAverage(decay=polyak_averaging)
with tf.control_dependencies([train_op]):
train_op = ema.apply()
utils.LogAndSaveHParams()
tf.Session.reset(master)
if warmstart_ckpt:
tf.init_from_checkpoint(warmstart_ckpt, {"/": "/"})
hooks = [
tf.train.StopAtStepHook(last_step=dataset.train_size * epochs //
TRAIN_BATCH),
tf.train.LoggingTensorHook(utils.GetLoggingOutputs(), every_n_secs=60)
]
tf.contrib.training.train(
train_op,
logdir=train_dir,
master=master,
hooks=hooks,
save_checkpoint_secs=120,
save_summaries_steps=60)
def Eval(model, dataset, train_dir, eval_dir, master,
use_polyak_averaging=False, max_number_of_evaluations=None):
data_idx, images = dataset.TestBatch(TEST_BATCH)
eval_op = model.EvalOp(data_idx, images)
utils.LogAndSaveHParams()
tf.train.get_or_create_global_step()
if use_polyak_averaging:
tf.logging.info("Using polyak averaging")
ema = tf.train.ExponentialMovingAverage(decay=0.99)
saver = tf.train.Saver(ema.variables_to_restore())
else:
saver = tf.train.Saver()
scaffold = tf.train.Scaffold(saver=saver)
tf.Session.reset(master)
hooks = [
# Just for logging.
tf.contrib.training.StopAfterNEvalsHook(dataset.test_size // TEST_BATCH),
tf.contrib.training.SummaryAtEndHook(eval_dir),
tf.train.LoggingTensorHook(utils.GetLoggingOutputs(), at_end=True)
]
tf.contrib.training.evaluate_repeatedly(
train_dir,
eval_ops=eval_op,
hooks=hooks,
# LOL...
eval_interval_secs=120,
max_number_of_evaluations=max_number_of_evaluations,
master=master,
scaffold=scaffold)
def AISEvalShard(shard, master, num_workers, num_chains, dataset, use_polyak_averaging, writer, train_dir, model_fn, batch):
tf.logging.info("Thread started")
model = model_fn()
tf.logging.info("Built model")
shard_idx = tf.placeholder(tf.int64, [])
tf.logging.info("built data")
data_iterator = dataset.AISIterator(batch, shard_idx, num_workers)
images, _ = data_iterator.get_next()
tf.logging.info("Built mA")
ais_outputs = model.AIS(images, num_chains)
log_p = ais_outputs.log_p
p_accept = ais_outputs.p_accept
tf.logging.info("Built mB")
if shard == 1:
utils.LogAndSaveHParams()
summary_op = tf.summary.merge_all()
global_step = tf.train.get_or_create_global_step()
if use_polyak_averaging:
tf.logging.info("Using polyak averaging")
ema = tf.train.ExponentialMovingAverage(decay=0.99)
saver = tf.train.Saver(ema.variables_to_restore())
else:
saver = tf.train.Saver()
tf.logging.info("Built mC")
global_step_val = []
tf.logging.info("Starting shard %d, %s", shard, master)
#with tf.MonitoredSession(
# tf.train.ChiefSessionCreator(
# master=master,
# checkpoint_dir=train_dir)) as sess:
while True:
try:
tf.Session.reset(master)
with tf.Session(master) as sess:
all_log_p = np.zeros([0])
saver.restore(sess, tf.train.latest_checkpoint(train_dir))
sess.run(data_iterator.initializer, {shard_idx: shard})
try:
step_num = 0
while True:
fetch = {
"log_p": log_p,
"global_step": global_step,
"p_accept": p_accept
}
if shard == 0:
fetch["summary"] = summary_op
tf.logging.info("Shard %d step %d started.", shard, step_num)
fetch = sess.run(fetch)
tf.logging.info("Shard %d step %d done.", shard, step_num)
tf.logging.info("Shard %d log_p %.2f, p_accept: %.2f", shard,
np.mean(fetch["log_p"]),
np.mean(fetch["p_accept"]))
all_log_p = np.hstack([all_log_p, fetch["log_p"]])
if shard == 0 and step_num == 0:
global_step_val.append(fetch["global_step"])
writer.add_summary(fetch["summary"], global_step_val[0])
step_num += 1
except tf.errors.OutOfRangeError:
tf.logging.info("Shard %d done.", shard)
pass
return all_log_p
except tf.errors.AbortedError:
pass
def AISEval(model_fn, dataset, train_dir, eval_dir, worker_master_pattern,
num_workers, num_chains, use_polyak_averaging=False):
tf.reset_default_graph()
log_p_ph = tf.placeholder(tf.float32, [None])
log_p_summary = tf.summary.scalar("log_p", tf.reduce_mean(log_p_ph))
writer = tf.summary.FileWriter(eval_dir)
with futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
results = []
for shard in range(num_workers):
tf.logging.info("Submitting shard %d", shard)
master = worker_master_pattern.format(shard)
results.append(
executor.submit(AISEvalShard, shard, master, num_workers, num_chains,
dataset, use_polyak_averaging, writer, train_dir,
model_fn, AIS_BATCH))
all_log_p = np.zeros([0])
for result in results:
log_p = result.result()
all_log_p = np.hstack([all_log_p, log_p])
log_p = np.mean(all_log_p)
tf.logging.info("Log P: %.2f", log_p)
with tf.Session() as sess:
writer.add_summary(
sess.run(log_p_summary, {log_p_ph: all_log_p}), 0)
writer.flush()
return log_p
MODEL_TO_CLASS = {"vae": VAE, "dlgm": DLGM}
def main(argv):
del argv # Unused.
utils.BindHParams(FLAGS.hparams)
if FLAGS.data_type == "mnist":
dataset = utils.MNISTDataset(FLAGS.mnist_data_dir, FLAGS.test_is_valid)
elif FLAGS.data_type == "fashion_mnist":
dataset = utils.MNISTDataset(FLAGS.fashion_mnist_data_dir, FLAGS.test_is_valid)
elif FLAGS.data_type == "cifar10":
dataset = utils.CIFAR10Dataset(FLAGS.cifar10_data_dir, FLAGS.test_is_valid)
elif FLAGS.data_type == "fake":
dataset = utils.FakeMNISTDataset()
if FLAGS.mode == "train":
model = MODEL_TO_CLASS[FLAGS.model](dataset=dataset)
Train(model, dataset, FLAGS.train_dir, FLAGS.master,
polyak_averaging=FLAGS.polyak_averaging)
elif FLAGS.mode == "eval":
model = MODEL_TO_CLASS[FLAGS.model](dataset=dataset)
Eval(model, dataset, FLAGS.train_dir, FLAGS.eval_dir,
FLAGS.master,
use_polyak_averaging=FLAGS.polyak_averaging > 0.0)
elif FLAGS.mode == "ais_eval":
replica_log_p = []
if FLAGS.ais_replicas:
replicas = FLAGS.ais_replicas
else:
replicas = list(range(FLAGS.ais_num_replicas))
for i in replicas:
train_dir = FLAGS.train_dir.format(i)
eval_dir = FLAGS.eval_dir.format(i)
model_fn = lambda: MODEL_TO_CLASS[FLAGS.model](dataset=dataset)
log_p = AISEval(model_fn, dataset, train_dir, eval_dir,
FLAGS.ais_worker_pattern, FLAGS.ais_num_workers,
FLAGS.ais_num_chains,
use_polyak_averaging=FLAGS.polyak_averaging > 0.0)
replica_log_p.append(log_p)
log_p = np.mean(replica_log_p)
std_log_p = np.std(replica_log_p)
tf.logging.info("Log P: %.2f +- %.2f", log_p,
std_log_p / np.sqrt(len(replicas)))
tf.logging.info("All log_p: %s", replica_log_p)
elif FLAGS.mode == "ais_eval2":
if FLAGS.ais_replicas:
replicas = FLAGS.ais_replicas
else:
replicas = list(range(FLAGS.ais_num_replicas))
for i in replicas:
tf.reset_default_graph()
train_dir = FLAGS.train_dir.format(i)
eval_dir = FLAGS.eval_dir.format(i)
model_fn = lambda: MODEL_TO_CLASS[FLAGS.model](dataset=dataset)
sentinel_filename = os.path.join(eval_dir, "ais_shard_%d_done" % FLAGS.ais_shard)
if tf.gfile.Exists(sentinel_filename):
continue
batch = FLAGS.ais_batch_size
assert (dataset.test_size // FLAGS.ais_num_workers) % batch == 0
writer = tf.summary.FileWriter(eval_dir)
log_p = AISEvalShard(FLAGS.ais_shard, "", FLAGS.ais_num_workers, FLAGS.ais_num_chains,
dataset, FLAGS.polyak_averaging > 0.0, writer, train_dir, model_fn, batch)
tf.gfile.MakeDirs(eval_dir)
with tf.gfile.Open(os.path.join(eval_dir, "ais_shard_%d" % FLAGS.ais_shard), "w") as f:
np.savetxt(f, log_p)
with tf.gfile.Open(sentinel_filename, "w") as f:
f.write("done")
if __name__ == "__main__":
flags.DEFINE_string("mnist_data_dir", "", "")
flags.DEFINE_string("fashion_mnist_data_dir", "", "")
flags.DEFINE_string("cifar10_data_dir", "", "")
flags.DEFINE_string("data_type", "mnist", "")
flags.DEFINE_enum("mode", "train", ["train", "eval", "ais_eval", "ais_eval2"], "")
flags.DEFINE_enum("model", "vae", list(MODEL_TO_CLASS.keys()), "")
flags.DEFINE_string("train_dir", "/tmp/vae/train", "")
flags.DEFINE_string("eval_dir", "/tmp/vae/eval", "")
flags.DEFINE_string("master", "", "")
flags.DEFINE_string("ais_worker_pattern", "", "")
flags.DEFINE_integer("ais_shard", 0, "")
flags.DEFINE_integer("ais_num_workers", 1, "")
flags.DEFINE_integer("ais_num_chains", 1, "")
flags.DEFINE_integer("ais_num_replicas", 1, "")
flags.DEFINE_list("ais_replicas", "", "Manual listing of replicas")
flags.DEFINE_integer("ais_batch_size", 25, "")
flags.DEFINE_float("polyak_averaging", 0.0, "")
flags.DEFINE_boolean("test_is_valid", False, "")
flags.DEFINE(utils.YAMLDictParser(), "hparams", "", "")
app.run(main)
| 1.84375
| 2
|
flask_oauth2_login/base.py
|
BasicBeluga/flask-oauth2-login
| 42
|
12654
|
from flask import request, session, url_for
from requests_oauthlib import OAuth2Session
class OAuth2Login(object):
def __init__(self, app=None):
if app:
self.init_app(app)
self.app = app
def get_config(self, app, name, default_value=None):
return app.config.get(self.config_prefix + name, default_value)
def init_app(self, app):
self.client_id = self.get_config(app, "CLIENT_ID")
self.client_secret = self.get_config(app, "CLIENT_SECRET")
self.scope = self.get_config(app, "SCOPE", self.default_scope).split(",")
self.redirect_scheme = self.get_config(app, "REDIRECT_SCHEME", "https")
app.add_url_rule(
self.get_config(app, "REDIRECT_PATH", self.default_redirect_path),
self.redirect_endpoint,
self.login,
)
@property
def redirect_uri(self):
return url_for(
self.redirect_endpoint,
_external=True,
_scheme=self.redirect_scheme,
)
def session(self):
return OAuth2Session(
self.client_id,
redirect_uri=self.redirect_uri,
scope=self.scope,
)
def authorization_url(self, **kwargs):
sess = self.session()
auth_url, state = sess.authorization_url(self.auth_url, **kwargs)
session[self.state_session_key] = state
return auth_url
def login(self):
sess = self.session()
# Get token
try:
sess.fetch_token(
self.token_url,
code=request.args["code"],
client_secret=self.client_secret,
)
# TODO: Check state
except Warning:
# Ignore warnings
pass
except Exception as e:
return self.login_failure_func(e)
# Get profile
try:
profile = self.get_profile(sess)
except Exception as e:
return self.login_failure_func(e)
return self.login_success_func(sess.token, profile)
def login_success(self, f):
self.login_success_func = f
return f
def login_failure(self, f):
self.login_failure_func = f
return f
def get_profile(self, sess):
raise NotImplementedError
| 2.96875
| 3
|
segmentation/utils/transforms.py
|
voldemortX/DST-CBC
| 103
|
12655
|
<gh_stars>100-1000
# Mostly copied and modified from torch/vision/references/segmentation to support unlabeled data
# Copied functions from fmassa/vision-1 to support multi-dimensional masks loaded from numpy ndarray
import numpy as np
from PIL import Image
import random
import torch
import utils.functional as F
# For 2/3 dimensional tensors only
def get_tensor_image_size(img):
if img.dim() == 2:
h, w = img.size()
else:
h = img.size()[1]
w = img.size()[2]
return h, w
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target, *args):
for t in self.transforms:
image, target = t(image, target)
return (image, target, *args)
class Resize(object):
def __init__(self, size_image, size_label):
self.size_image = size_image
self.size_label = size_label
def __call__(self, image, target):
image = image if type(image) == str else F.resize(image, self.size_image, interpolation=Image.LINEAR)
target = target if type(target) == str else F.resize(target, self.size_label, interpolation=Image.NEAREST)
return image, target
# Pad image with zeros, yet pad target with 255 (ignore label) on bottom & right if
# given a bigger desired size (or else nothing is done at all)
class ZeroPad(object):
def __init__(self, size):
self.h, self.w = size
@staticmethod
def zero_pad(image, target, h, w):
oh, ow = get_tensor_image_size(image)
pad_h = h - oh if oh < h else 0
pad_w = w - ow if ow < w else 0
image = F.pad(image, (0, 0, pad_w, pad_h), fill=0)
target = target if type(target) == str else F.pad(target, (0, 0, pad_w, pad_h), fill=255)
return image, target
def __call__(self, image, target):
return self.zero_pad(image, target, self.h, self.w)
class RandomResize(object):
def __init__(self, min_size, max_size=None):
self.min_size = min_size
if max_size is None:
max_size = min_size
self.max_size = max_size
def __call__(self, image, target):
min_h, min_w = self.min_size
max_h, max_w = self.max_size
h = random.randint(min_h, max_h)
w = random.randint(min_w, max_w)
image = F.resize(image, (h, w), interpolation=Image.LINEAR)
target = target if type(target) == str else F.resize(target, (h, w), interpolation=Image.NEAREST)
return image, target
class RandomScale(object):
def __init__(self, min_scale, max_scale=None):
self.min_scale = min_scale
if max_scale is None:
max_scale = min_scale
self.max_scale = max_scale
def __call__(self, image, target):
scale = random.uniform(self.min_scale, self.max_scale)
h, w = get_tensor_image_size(image)
h = int(scale * h)
w = int(scale * w)
image = F.resize(image, (h, w), interpolation=Image.LINEAR)
target = target if type(target) == str else F.resize(target, (h, w), interpolation=Image.NEAREST)
return image, target
class RandomCrop(object):
def __init__(self, size):
self.size = size
@staticmethod
def get_params(img, output_size):
h, w = get_tensor_image_size(img)
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, image, target):
# Pad if needed
ih, iw = get_tensor_image_size(image)
if ih < self.size[0] or iw < self.size[1]:
image, target = ZeroPad.zero_pad(image, target,
max(self.size[0], ih),
max(self.size[1], iw))
i, j, h, w = self.get_params(image, self.size)
image = F.crop(image, i, j, h, w)
target = target if type(target) == str else F.crop(target, i, j, h, w)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, flip_prob):
self.flip_prob = flip_prob
def __call__(self, image, target):
t = random.random()
if t < self.flip_prob:
image = F.hflip(image)
target = target if (type(target) == str or t >= self.flip_prob) else F.hflip(target)
return image, target
class ToTensor(object):
def __init__(self, keep_scale=False, reverse_channels=False):
# keep_scale = True => Images or whatever are not divided by 255
# reverse_channels = True => RGB images are changed to BGR(the default behavior of openCV & Caffe,
# let's wish them all go to heaven,
# for they wasted me days!)
self.keep_scale = keep_scale
self.reverse_channels = reverse_channels
def __call__(self, image, target):
image = image if type(image) == str else self._pil_to_tensor(image)
target = target if type(target) == str else self.label_to_tensor(target)
return image, target
@staticmethod
def label_to_tensor(pic): # 3 dimensional arrays or normal segmentation masks
if isinstance(pic, np.ndarray):
return torch.as_tensor(pic.transpose((2, 0, 1)), dtype=torch.float32)
else:
return torch.as_tensor(np.asarray(pic).copy(), dtype=torch.int64)
def _pil_to_tensor(self, pic):
# Convert a PIL Image to tensor(a direct copy)
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
elif pic.mode == 'F':
img = torch.from_numpy(np.array(pic, np.float32, copy=False))
elif pic.mode == '1':
img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
if self.reverse_channels: # Beware this only works with 3 channels(can't use -1 with tensors)
img = img[:, :, [2, 1, 0]]
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
if self.keep_scale:
return img.float()
else:
return img.float().div(255)
else:
return img
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target):
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
# Init with a python list as the map(mainly for cityscapes's id -> train_id)
class LabelMap(object):
def __init__(self, label_id_map):
self.label_id_map = torch.tensor(label_id_map)
def __call__(self, image, target):
target = target if type(target) == str else self.label_id_map[target]
return image, target
| 2.3125
| 2
|
server.py
|
drunkHatch/CMPUT404-assignment-webserver
| 0
|
12656
|
# coding: utf-8
import socketserver
import re
import socket
import datetime
import os
import mimetypes as MT
import sys
# Copyright 2013 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
# status codes could be handled
STATUS_CODE_RESPONSE = {
0: " 0 Surprise!",
200: " 200 OK",
301: " 301 Moved Permanently",
404: " 404 Not Found",
405: " 405 Method Not Allowed"
}
# methods could be handled
HTTP_REQUEST_METHODS = {
"GET": 1,
}
# some hard coded text
END_OF_LINE_RESPONSE = "\r\n"
PROTOCOL_RESPONSE = "HTTP/1.1"
DIRECTORY_TO_SERVE = "www"
# open file error here
GOODFILE = 1
ISADIRECTORY = 2
NOFILE = 3
# response generate class
class MyServerResponse:
def __init__(self, status=0, expire_time="-1", content_type="default", \
accept_ranges="none"):
self.response_header = {
"status_response": PROTOCOL_RESPONSE + STATUS_CODE_RESPONSE[status],
"date_response": "Date: " + datetime.datetime.now().\
strftime('%A, %d %b %Y %X %Z'),
"expires": "Expires: " + expire_time,
"content_type": "Content-Type: " + content_type,
"accept_ranges": "Accept-Ranges: " + accept_ranges,
"redirect_address": "Location: http://",
"allow_header": "ALlow: GET"
}
# send header via various status_code
def send_header(self, conn, status_code):
tmp = self.response_header["status_response"] + END_OF_LINE_RESPONSE
conn.sendall(tmp.encode("utf-8"))
if status_code == 200:
tmp = self.response_header["expires"] + END_OF_LINE_RESPONSE
conn.sendall(tmp.encode("utf-8"))
tmp = self.response_header["content_type"] + END_OF_LINE_RESPONSE
conn.sendall(tmp.encode("utf-8"))
elif status_code == 301:
tmp = self.response_header["redirect_address"] + \
END_OF_LINE_RESPONSE
conn.sendall(tmp.encode("utf-8"))
elif status_code == 405:
tmp = self.response_header["allow_header"] + END_OF_LINE_RESPONSE
conn.sendall(tmp.encode("utf-8"))
def set_status_response(self, status_code):
self.response_header["status_response"] = \
PROTOCOL_RESPONSE + STATUS_CODE_RESPONSE[status_code]
# request for storing received request attributes
class MyServerRequest:
def __init__(self):
self.method = None
self.url = None
def method_is_valid(self):
if self.method in HTTP_REQUEST_METHODS:
return True
else:
return False
# add more implementation here
def url_is_valid(self):
return True
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
rest_protocol_flag = False
standard_rest_cmd = "GET / HTTP/1.1"
# init the socket
self.request.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
full_data = b""
with self.request as conn:
# declaration here
new_request = MyServerRequest()
status_code = 0
open_file = True
file = None
content_type = "void of magic"
file_name = "none"
type_of_file = "default"
open_result = -100
new_response = MyServerResponse()
# recv all data
while True:
data = conn.recv(1024)
if not data: break
full_data += data
if b"\r\n" in data:
break
if b"utf" in full_data:
print(full_data)
pass
str_full_data = full_data.decode("utf-8")
splited_commands = re.split('[\r|\n]+', str_full_data)
whole_request = splited_commands[0].split(' ')
# if we can find request from recved data
if len(whole_request) > 0:
new_request.method = whole_request[0] # try to pick methods
new_request.url = whole_request[1] # try to pick url
# if method we get could not be handled
if not new_request.method_is_valid():
status_code = 405
open_file = False
content_type = "none"
new_response.set_status_response(status_code)
# if no errors occured and then try to open requested url
if open_file:
open_result, file, file_name = openRequestedFile(new_request.url)
# try opening requested file, and return corresponding status_code
status_code = checkErrorsOfOpenedFile\
(status_code, open_result, file, file_name)
# SECURITY: check permission of opened file
status_code = checkPermissionOfRequestedFile\
(status_code, open_result, file, file_name)
new_response.set_status_response(status_code)
if status_code == 200 and file_name != None:
type_of_file = MT.guess_type(file_name, False)[0]
elif status_code == 301:
new_response.response_header["redirect_address"] += \
self.server.server_address[0] + ":" + \
str(self.server.server_address[1]) + \
new_request.url + "/"
new_response.set_status_response(status_code)
if open_result == GOODFILE and type_of_file != None:
new_response.response_header["content_type"] = "Content-Type: "
new_response.response_header["content_type"] += type_of_file
new_response.send_header(conn, status_code)
self.request.sendall(b"\r\n")
# then open file/directory and send it
if file:
self.request.sendfile(file)
#self.request.sendall(b"\r\n")
conn.close()
# argument: requested url
# return value: open file result, opened file object, local path
def openRequestedFile(client_request_url):
cru = client_request_url
if cru[-1] == r'/':
cru += "index.html"
complete_path = DIRECTORY_TO_SERVE + cru
try:
result = open(complete_path, 'rb')
content_type = cru.split(".")
return GOODFILE, result, cru
except IsADirectoryError as e:
return ISADIRECTORY, None, None
except FileNotFoundError as n:
return NOFILE, None, None
# check type and error of opened file
def checkErrorsOfOpenedFile(status_code,open_result, file, file_name):
if open_result == GOODFILE:
status_code = 200
type_of_file = MT.guess_type(file_name, False)[0]
elif open_result == ISADIRECTORY:
status_code = 301
elif open_result == NOFILE:
status_code = 404
return status_code
# SECURITY: check the permission of opened file
def checkPermissionOfRequestedFile(status_code,open_result, file, file_name):
if file_name == None:
return status_code
abs_path_of_serving_dir = os.getcwd()
abs_path_of_serving_dir += "/www/"
length_of_serving_dir = len(abs_path_of_serving_dir)
abs_path_of_request = os.path.abspath(file.name)
length_of_requested_object = len(abs_path_of_request)
if length_of_serving_dir > length_of_requested_object:
status_code = 404
elif abs_path_of_serving_dir != abs_path_of_request[:length_of_serving_dir]:
status_code = 404
return status_code
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# https://stackoverflow.com/questions/15260558/python-tcpserver-address-already-in-use-but-i-close-the-server-and-i-use-allow
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
try:
server.serve_forever()
except KeyboardInterrupt: # exit if ctrl+C
sys.exit(0)
| 2.5625
| 3
|
cloudkitty/rating/hash/controllers/root.py
|
wanghuiict/cloudkitty
| 97
|
12657
|
# -*- coding: utf-8 -*-
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from cloudkitty import rating
from cloudkitty.rating.hash.controllers import field as field_api
from cloudkitty.rating.hash.controllers import group as group_api
from cloudkitty.rating.hash.controllers import mapping as mapping_api
from cloudkitty.rating.hash.controllers import service as service_api
from cloudkitty.rating.hash.controllers import threshold as threshold_api
from cloudkitty.rating.hash.datamodels import mapping as mapping_models
class HashMapConfigController(rating.RatingRestControllerBase):
"""Controller exposing all management sub controllers."""
_custom_actions = {
'types': ['GET']
}
services = service_api.HashMapServicesController()
fields = field_api.HashMapFieldsController()
groups = group_api.HashMapGroupsController()
mappings = mapping_api.HashMapMappingsController()
thresholds = threshold_api.HashMapThresholdsController()
@wsme_pecan.wsexpose([wtypes.text])
def get_types(self):
"""Return the list of every mapping type available.
"""
return mapping_models.MAP_TYPE.values
| 1.828125
| 2
|
.test/test/task2/Aufgabe1/python-lib/cuddlefish/docs/webdocs.py
|
sowinski/testsubtree
| 0
|
12658
|
import os, re, errno
import markdown
import cgi
from cuddlefish import packaging
from cuddlefish.docs import apirenderer
from cuddlefish._version import get_versions
INDEX_PAGE = '/doc/static-files/base.html'
BASE_URL_INSERTION_POINT = '<base '
VERSION_INSERTION_POINT = '<div id="version">'
THIRD_PARTY_PACKAGE_SUMMARIES = '<ul id="third-party-package-summaries">'
HIGH_LEVEL_PACKAGE_SUMMARIES = '<ul id="high-level-package-summaries">'
LOW_LEVEL_PACKAGE_SUMMARIES = '<ul id="low-level-package-summaries">'
CONTENT_ID = '<div id="main-content">'
TITLE_ID = '<title>'
DEFAULT_TITLE = 'Add-on SDK Documentation'
def get_documentation(package_name, modules_json, doc_path):
documented_modules = []
for root, dirs, files in os.walk(doc_path):
subdir_path = root.split(os.sep)[len(doc_path.split(os.sep)):]
for filename in files:
if filename.endswith(".md"):
modname = filename[:-len(".md")]
modpath = subdir_path + [modname]
documented_modules.append(modpath)
return documented_modules
def tag_wrap(text, tag, attributes={}):
result = '\n<' + tag
for name in attributes.keys():
result += ' ' + name + '=' + '"' + attributes[name] + '"'
result +='>' + text + '</'+ tag + '>\n'
return result
def is_third_party(package_json):
return (not is_high_level(package_json)) and \
(not(is_low_level(package_json)))
def is_high_level(package_json):
return 'jetpack-high-level' in package_json.get('keywords', [])
def is_low_level(package_json):
return 'jetpack-low-level' in package_json.get('keywords', [])
def insert_after(target, insertion_point_id, text_to_insert):
insertion_point = target.find(insertion_point_id) + len(insertion_point_id)
return target[:insertion_point] + text_to_insert + target[insertion_point:]
class WebDocs(object):
def __init__(self, root, base_url = None):
self.root = root
self.pkg_cfg = packaging.build_pkg_cfg(root)
self.packages_json = packaging.build_pkg_index(self.pkg_cfg)
self.base_page = self._create_base_page(root, base_url)
def create_guide_page(self, path):
path, ext = os.path.splitext(path)
md_path = path + '.md'
md_content = unicode(open(md_path, 'r').read(), 'utf8')
guide_content = markdown.markdown(md_content)
return self._create_page(guide_content)
def create_module_page(self, path):
path, ext = os.path.splitext(path)
md_path = path + '.md'
module_content = apirenderer.md_to_div(md_path)
return self._create_page(module_content)
def create_package_page(self, package_name):
package_content = self._create_package_detail(package_name)
return self._create_page(package_content)
def _create_page(self, page_content):
page = self._insert_title(self.base_page, page_content)
page = insert_after(page, CONTENT_ID, page_content)
return page.encode('utf8')
def _create_module_list(self, package_json):
package_name = package_json['name']
libs = package_json['files'][1]['lib'][1]
doc_path = package_json.get('doc', None)
if not doc_path:
return ''
modules = get_documentation(package_name, libs, doc_path)
modules.sort()
module_items = ''
relative_doc_path = doc_path[len(self.root) + 1:]
relative_doc_path_pieces = relative_doc_path.split(os.sep)
del relative_doc_path_pieces[-1]
relative_doc_URL = "/".join(relative_doc_path_pieces)
for module in modules:
module_link = tag_wrap('/'.join(module), 'a', \
{'href': relative_doc_URL + '/' + '/'.join(module) + '.html'})
module_items += module_link
return module_items
def _create_package_summaries(self, packages_json, include):
packages = ''
for package_name in packages_json.keys():
package_json = packages_json[package_name]
if not include(package_json):
continue
package_path = self.pkg_cfg["packages"][package_name]["root_dir"]
package_directory = package_path[len(self.root) + 1:]
package_directory = "/".join(package_directory.split(os.sep))
package_link = tag_wrap(package_name, 'a', {'href': \
package_directory + "/" \
+ 'index.html'})
text = tag_wrap(package_link, 'h4')
text += self._create_module_list(package_json)
packages += tag_wrap(text, 'li', {'class':'package-summary', \
'style':'display: block;'})
return packages
def _create_base_page(self, root, base_url):
base_page = unicode(open(root + INDEX_PAGE, 'r').read(), 'utf8')
if base_url:
base_tag = 'href="' + base_url + '"'
base_page = insert_after(base_page, BASE_URL_INSERTION_POINT, base_tag)
sdk_version = get_versions()["version"]
base_page = insert_after(base_page, VERSION_INSERTION_POINT, "Version " + sdk_version)
third_party_summaries = \
self._create_package_summaries(self.packages_json, is_third_party)
base_page = insert_after(base_page, \
THIRD_PARTY_PACKAGE_SUMMARIES, third_party_summaries)
high_level_summaries = \
self._create_package_summaries(self.packages_json, is_high_level)
base_page = insert_after(base_page, \
HIGH_LEVEL_PACKAGE_SUMMARIES, high_level_summaries)
low_level_summaries = \
self._create_package_summaries(self.packages_json, is_low_level)
base_page = insert_after(base_page, \
LOW_LEVEL_PACKAGE_SUMMARIES, low_level_summaries)
return base_page
def _create_package_detail_row(self, field_value, \
field_descriptor, field_name):
meta = tag_wrap(tag_wrap(field_descriptor, 'span', \
{'class':'meta-header'}), 'td')
value = tag_wrap(tag_wrap(field_value, 'span', \
{'class':field_name}), 'td')
return tag_wrap(meta + value, 'tr')
def _create_package_detail_table(self, package_json):
table_contents = ''
if package_json.get('author', None):
table_contents += self._create_package_detail_row(\
cgi.escape(package_json['author']), 'Author', 'author')
if package_json.get('version', None):
table_contents += self._create_package_detail_row(\
package_json['version'], 'Version', 'version')
if package_json.get('license', None):
table_contents += self._create_package_detail_row(\
package_json['license'], 'License', 'license')
if package_json.get('dependencies', None):
table_contents += self._create_package_detail_row(\
', '.join(package_json['dependencies']), \
'Dependencies', 'dependencies')
table_contents += self._create_package_detail_row(\
self._create_module_list(package_json), 'Modules', 'modules')
return tag_wrap(tag_wrap(table_contents, 'tbody'), 'table', \
{'class':'meta-table'})
def _create_package_detail(self, package_name):
package_json = self.packages_json.get(package_name, None)
if not package_json:
raise IOError(errno.ENOENT, 'Package not found')
# pieces of the package detail: 1) title, 2) table, 3) description
package_title = tag_wrap(package_name, 'h1')
table = self._create_package_detail_table(package_json)
description = ''
if package_json.get('readme', None):
description += tag_wrap(tag_wrap(\
markdown.markdown(\
package_json['readme']), 'p'), 'div', {'class':'docs'})
return tag_wrap(package_title + table + description, 'div', \
{'class':'package-detail'})
def _insert_title(self, target, content):
match = re.search('<h1>.*</h1>', content)
if match:
title = match.group(0)[len('<h1>'):-len('</h1>')] + ' - ' + \
DEFAULT_TITLE
else:
title = DEFAULT_TITLE
target = insert_after(target, TITLE_ID, title)
return target
| 2.3125
| 2
|
src/c3nav/site/templatetags/route_render.py
|
johnjohndoe/c3nav
| 132
|
12659
|
<reponame>johnjohndoe/c3nav
from django import template
register = template.Library()
@register.filter
def negate(value):
return -value
@register.filter
def subtract(value, arg):
return value - arg
| 1.953125
| 2
|
coax/experience_replay/_prioritized.py
|
sleepy-owl/coax
| 0
|
12660
|
# ------------------------------------------------------------------------------------------------ #
# MIT License #
# #
# Copyright (c) 2020, Microsoft Corporation #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software #
# and associated documentation files (the "Software"), to deal in the Software without #
# restriction, including without limitation the rights to use, copy, modify, merge, publish, #
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all copies or #
# substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING #
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, #
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
# ------------------------------------------------------------------------------------------------ #
import jax
import numpy as onp
import chex
from ..reward_tracing import TransitionBatch
from ..utils import SumTree
from ._base import BaseReplayBuffer
__all__ = (
'PrioritizedReplayBuffer',
)
class PrioritizedReplayBuffer(BaseReplayBuffer):
r"""
A simple ring buffer for experience replay, with prioritized sampling.
This class uses *proportional* sampling, which means that the transitions are sampled with
relative probability :math:`p_i` defined as:
.. math::
p_i\ =\ \frac
{\left(|\mathcal{A}_i| + \epsilon\right)^\alpha}
{\sum_{j=1}^N \left(|\mathcal{A}_j| + \epsilon\right)^\alpha}
Here :math:`\mathcal{A}_i` are advantages provided at insertion time and :math:`N` is the
capacity of the buffer, which may be quite large. The :math:`\mathcal{A}_i` are typically just
TD errors collected from a value-function updater, e.g. :func:`QLearning.td_error
<coax.td_learning.QLearning.td_error>`.
Since the prioritized samples are biased, the :attr:`sample` method also produces non-trivial
importance weights (stored in the :class:`TransitionBatch.W
<coax.reward_tracing.TransitionBatch>` attribute). The logic for constructing these weights for
a sample of batch size :math:`n` is:
.. math::
w_i\ =\ \frac{\left(Np_i\right)^{-\beta}}{\max_{j=1}^n \left(Np_j\right)^{-\beta}}
See section 3.4 of https://arxiv.org/abs/1511.05952 for more details.
Parameters
----------
capacity : positive int
The capacity of the experience replay buffer.
alpha : positive float, optional
The sampling temperature :math:`\alpha>0`.
beta : positive float, optional
The importance-weight exponent :math:`\beta>0`.
epsilon : positive float, optional
The small regulator :math:`\epsilon>0`.
random_seed : int, optional
To get reproducible results.
"""
def __init__(self, capacity, alpha=1.0, beta=1.0, epsilon=1e-4, random_seed=None):
if not (isinstance(capacity, int) and capacity > 0):
raise TypeError(f"capacity must be a positive int, got: {capacity}")
if not (isinstance(alpha, (float, int)) and alpha > 0):
raise TypeError(f"alpha must be a positive float, got: {alpha}")
if not (isinstance(beta, (float, int)) and beta > 0):
raise TypeError(f"beta must be a positive float, got: {beta}")
if not (isinstance(epsilon, (float, int)) and epsilon > 0):
raise TypeError(f"epsilon must be a positive float, got: {epsilon}")
self._capacity = int(capacity)
self._alpha = float(alpha)
self._beta = float(beta)
self._epsilon = float(epsilon)
self._random_seed = random_seed
self._rnd = onp.random.RandomState(random_seed)
self.clear() # sets: self._deque, self._index
@property
def capacity(self):
return self._capacity
@property
def alpha(self):
return self._alpha
@alpha.setter
def alpha(self, new_alpha):
if not (isinstance(new_alpha, (float, int)) and new_alpha > 0):
raise TypeError(f"alpha must be a positive float, got: {new_alpha}")
if onp.isclose(new_alpha, self._alpha, rtol=0.01):
return # noop if new value is too close to old value (not worth the computation cost)
new_values = onp.where(
self._sumtree.values <= 0, 0., # only change exponents for positive values
onp.exp(onp.log(onp.maximum(self._sumtree.values, 1e-15)) * (new_alpha / self._alpha)))
self._sumtree.set_values(..., new_values)
self._alpha = float(new_alpha)
@property
def beta(self):
return self._beta
@beta.setter
def beta(self, new_beta):
if not (isinstance(new_beta, (float, int)) and new_beta > 0):
raise TypeError(f"beta must be a positive float, got: {new_beta}")
self._beta = float(new_beta)
@property
def epsilon(self):
return self._epsilon
@epsilon.setter
def epsilon(self, new_epsilon):
if not (isinstance(new_epsilon, (float, int)) and new_epsilon > 0):
raise TypeError(f"epsilon must be a positive float, got: {new_epsilon}")
self._epsilon = float(new_epsilon)
def add(self, transition_batch, Adv):
r"""
Add a transition to the experience replay buffer.
Parameters
----------
transition_batch : TransitionBatch
A :class:`TransitionBatch <coax.reward_tracing.TransitionBatch>` object.
Adv : ndarray
A batch of advantages, used to construct the priorities :math:`p_i`.
"""
if not isinstance(transition_batch, TransitionBatch):
raise TypeError(
f"transition_batch must be a TransitionBatch, got: {type(transition_batch)}")
transition_batch.idx = self._index + onp.arange(transition_batch.batch_size)
idx = transition_batch.idx % self.capacity # wrap around
chex.assert_equal_shape([idx, Adv])
self._storage[idx] = list(transition_batch.to_singles())
self._sumtree.set_values(idx, onp.power(onp.abs(Adv) + self.epsilon, self.alpha))
self._index += transition_batch.batch_size
def sample(self, batch_size=32):
r"""
Get a batch of transitions to be used for bootstrapped updates.
Parameters
----------
batch_size : positive int, optional
The desired batch size of the sample.
Returns
-------
transitions : TransitionBatch
A :class:`TransitionBatch <coax.reward_tracing.TransitionBatch>` object.
"""
idx = self._sumtree.sample(n=batch_size)
P = self._sumtree.values[idx] / self._sumtree.root_value # prioritized, biased propensities
W = onp.power(P * len(self), -self.beta) # inverse propensity weights (β≈1)
W /= W.max() # for stability, ensure only down-weighting (see sec. 3.4 of arxiv:1511.05952)
transition_batch = _concatenate_leaves(self._storage[idx])
chex.assert_equal_shape([transition_batch.W, W])
transition_batch.W *= W
return transition_batch
def update(self, idx, Adv):
r"""
Update the priority weights of transitions previously added to the buffer.
Parameters
----------
idx : 1d array of ints
The identifiers of the transitions to be updated.
Adv : ndarray
The corresponding updated advantages.
"""
idx = onp.asarray(idx, dtype='int32')
Adv = onp.asarray(Adv, dtype='float32')
chex.assert_equal_shape([idx, Adv])
chex.assert_rank([idx, Adv], 1)
idx_lookup = idx % self.capacity # wrap around
new_values = onp.where(
_get_transition_batch_idx(self._storage[idx_lookup]) == idx, # only update if ids match
onp.power(onp.abs(Adv) + self.epsilon, self.alpha),
self._sumtree.values[idx_lookup])
self._sumtree.set_values(idx_lookup, new_values)
def clear(self):
r""" Clear the experience replay buffer. """
self._storage = onp.full(shape=(self.capacity,), fill_value=None, dtype='object')
self._sumtree = SumTree(capacity=self.capacity)
self._index = 0
def __len__(self):
return min(self.capacity, self._index)
def __bool__(self):
return bool(len(self))
def __iter__(self):
return iter(self._storage[:len(self)])
def _concatenate_leaves(pytrees):
return jax.tree_multimap(lambda *leaves: onp.concatenate(leaves, axis=0), *pytrees)
@onp.vectorize
def _get_transition_batch_idx(transition):
return transition.idx
| 1.148438
| 1
|
src/OTLMOW/OEFModel/Classes/Wilddet.py
|
davidvlaminck/OTLClassPython
| 2
|
12661
|
# coding=utf-8
from OTLMOW.OEFModel.EMObject import EMObject
# Generated with OEFClassCreator. To modify: extend, do not edit
class Wilddet(EMObject):
"""Een wilddetectiesysteem zal de weggebruikers waarschuwen bij de aanwezigheid van eventueel overstekend wild"""
typeURI = 'https://lgc.data.wegenenverkeer.be/ns/installatie#Wilddet'
label = 'Wilddetectiesysteem'
def __init__(self):
super().__init__()
| 1.804688
| 2
|
test/python/testworkflow.py
|
kokizzu/txtai
| 0
|
12662
|
<gh_stars>0
"""
Workflow module tests
"""
import contextlib
import glob
import io
import os
import tempfile
import sys
import unittest
import numpy as np
import torch
from txtai.api import API
from txtai.embeddings import Documents, Embeddings
from txtai.pipeline import Nop, Segmentation, Summary, Translation, Textractor
from txtai.workflow import Workflow, Task, ConsoleTask, ExportTask, FileTask, ImageTask, RetrieveTask, StorageTask, WorkflowTask
# pylint: disable = C0411
from utils import Utils
# pylint: disable=R0904
class TestWorkflow(unittest.TestCase):
"""
Workflow tests.
"""
@classmethod
def setUpClass(cls):
"""
Initialize test data.
"""
# Default YAML workflow configuration
cls.config = """
# Embeddings index
writable: true
embeddings:
scoring: bm25
path: google/bert_uncased_L-2_H-128_A-2
content: true
# Text segmentation
segmentation:
sentences: true
# Workflow definitions
workflow:
index:
tasks:
- action: segmentation
- action: index
search:
tasks:
- search
transform:
tasks:
- transform
"""
def testBaseWorkflow(self):
"""
Tests a basic workflow
"""
translate = Translation()
# Workflow that translate text to Spanish
workflow = Workflow([Task(lambda x: translate(x, "es"))])
results = list(workflow(["The sky is blue", "Forest through the trees"]))
self.assertEqual(len(results), 2)
def testChainWorkflow(self):
"""
Tests a chain of workflows
"""
workflow1 = Workflow([Task(lambda x: [y * 2 for y in x])])
workflow2 = Workflow([Task(lambda x: [y - 1 for y in x])], batch=4)
results = list(workflow2(workflow1([1, 2, 4, 8, 16, 32])))
self.assertEqual(results, [1, 3, 7, 15, 31, 63])
def testComplexWorkflow(self):
"""
Tests a complex workflow
"""
textractor = Textractor(paragraphs=True, minlength=150, join=True)
summary = Summary("t5-small")
embeddings = Embeddings({"path": "sentence-transformers/nli-mpnet-base-v2"})
documents = Documents()
def index(x):
documents.add(x)
return x
# Extract text and summarize articles
articles = Workflow([FileTask(textractor), Task(lambda x: summary(x, maxlength=15))])
# Complex workflow that extracts text, runs summarization then loads into an embeddings index
tasks = [WorkflowTask(articles, r".\.pdf$"), Task(index, unpack=False)]
data = ["file://" + Utils.PATH + "/article.pdf", "Workflows can process audio files, documents and snippets"]
# Convert file paths to data tuples
data = [(x, element, None) for x, element in enumerate(data)]
# Execute workflow, discard results as they are streamed
workflow = Workflow(tasks)
data = list(workflow(data))
# Build the embeddings index
embeddings.index(documents)
# Cleanup temporary storage
documents.close()
# Run search and validate result
index, _ = embeddings.search("search text", 1)[0]
self.assertEqual(index, 0)
self.assertEqual(data[0][1], "txtai builds an AI-powered index over sections")
def testConcurrentWorkflow(self):
"""
Tests running concurrent task actions
"""
nop = Nop()
workflow = Workflow([Task([nop, nop], concurrency="thread")])
results = list(workflow([2, 4]))
self.assertEqual(results, [(2, 2), (4, 4)])
workflow = Workflow([Task([nop, nop], concurrency="process")])
results = list(workflow([2, 4]))
self.assertEqual(results, [(2, 2), (4, 4)])
workflow = Workflow([Task([nop, nop], concurrency="unknown")])
results = list(workflow([2, 4]))
self.assertEqual(results, [(2, 2), (4, 4)])
def testConsoleWorkflow(self):
"""
Tests a console task
"""
# Excel export
workflow = Workflow([ConsoleTask()])
output = io.StringIO()
with contextlib.redirect_stdout(output):
list(workflow([{"id": 1, "text": "Sentence 1"}, {"id": 2, "text": "Sentence 2"}]))
self.assertIn("Sentence 2", output.getvalue())
def testExportWorkflow(self):
"""
Tests an export task
"""
# Excel export
path = os.path.join(tempfile.gettempdir(), "export.xlsx")
workflow = Workflow([ExportTask(output=path)])
list(workflow([{"id": 1, "text": "Sentence 1"}, {"id": 2, "text": "Sentence 2"}]))
self.assertGreater(os.path.getsize(path), 0)
# Export CSV
path = os.path.join(tempfile.gettempdir(), "export.csv")
workflow = Workflow([ExportTask(output=path)])
list(workflow([{"id": 1, "text": "Sentence 1"}, {"id": 2, "text": "Sentence 2"}]))
self.assertGreater(os.path.getsize(path), 0)
# Export CSV with timestamp
path = os.path.join(tempfile.gettempdir(), "export-timestamp.csv")
workflow = Workflow([ExportTask(output=path, timestamp=True)])
list(workflow([{"id": 1, "text": "Sentence 1"}, {"id": 2, "text": "Sentence 2"}]))
# Find timestamped file and ensure it has data
path = glob.glob(os.path.join(tempfile.gettempdir(), "export-timestamp*.csv"))[0]
self.assertGreater(os.path.getsize(path), 0)
def testExtractWorkflow(self):
"""
Tests column extraction tasks
"""
workflow = Workflow([Task(lambda x: x, unpack=False, column=0)], batch=1)
results = list(workflow([(0, 1)]))
self.assertEqual(results[0], 0)
results = list(workflow([(0, (1, 2), None)]))
self.assertEqual(results[0], (0, 1, None))
results = list(workflow([1]))
self.assertEqual(results[0], 1)
def testImageWorkflow(self):
"""
Tests an image task
"""
workflow = Workflow([ImageTask()])
results = list(workflow([Utils.PATH + "/books.jpg"]))
self.assertEqual(results[0].size, (1024, 682))
def testInvalidWorkflow(self):
"""
Tests task with invalid parameters
"""
with self.assertRaises(TypeError):
Task(invalid=True)
def testMergeWorkflow(self):
"""
Tests merge tasks
"""
task = Task([lambda x: [pow(y, 2) for y in x], lambda x: [pow(y, 3) for y in x]], merge="hstack")
# Test hstack (column-wise) merge
workflow = Workflow([task])
results = list(workflow([2, 4]))
self.assertEqual(results, [(4, 8), (16, 64)])
# Test vstack (row-wise) merge
task.merge = "vstack"
results = list(workflow([2, 4]))
self.assertEqual(results, [4, 8, 16, 64])
# Test concat (values joined into single string) merge
task.merge = "concat"
results = list(workflow([2, 4]))
self.assertEqual(results, ["4. 8", "16. 64"])
# Test no merge
task.merge = None
results = list(workflow([2, 4, 6]))
self.assertEqual(results, [[4, 16, 36], [8, 64, 216]])
# Test generated (id, data, tag) tuples are properly returned
workflow = Workflow([Task(lambda x: [(0, y, None) for y in x])])
results = list(workflow([(1, "text", "tags")]))
self.assertEqual(results[0], (0, "text", None))
def testMergeUnbalancedWorkflow(self):
"""
Test merge tasks with unbalanced outputs (i.e. one action produce more output than another for same input).
"""
nop = Nop()
segment1 = Segmentation(sentences=True)
task = Task([nop, segment1])
# Test hstack
workflow = Workflow([task])
results = list(workflow(["This is a test sentence. And another sentence to split."]))
self.assertEqual(
results, [("This is a test sentence. And another sentence to split.", ["This is a test sentence.", "And another sentence to split."])]
)
# Test vstack
task.merge = "vstack"
workflow = Workflow([task])
results = list(workflow(["This is a test sentence. And another sentence to split."]))
self.assertEqual(
results, ["This is a test sentence. And another sentence to split.", "This is a test sentence.", "And another sentence to split."]
)
def testNumpyWorkflow(self):
"""
Tests a numpy workflow
"""
task = Task([lambda x: np.power(x, 2), lambda x: np.power(x, 3)], merge="hstack")
# Test hstack (column-wise) merge
workflow = Workflow([task])
results = list(workflow(np.array([2, 4])))
self.assertTrue(np.array_equal(np.array(results), np.array([[4, 8], [16, 64]])))
# Test vstack (row-wise) merge
task.merge = "vstack"
results = list(workflow(np.array([2, 4])))
self.assertEqual(results, [4, 8, 16, 64])
# Test no merge
task.merge = None
results = list(workflow(np.array([2, 4, 6])))
self.assertTrue(np.array_equal(np.array(results), np.array([[4, 16, 36], [8, 64, 216]])))
def testRetrieveWorkflow(self):
"""
Tests a retrieve task
"""
# Test retrieve with generated temporary directory
workflow = Workflow([RetrieveTask()])
results = list(workflow(["file://" + Utils.PATH + "/books.jpg"]))
self.assertTrue(results[0].endswith("books.jpg"))
# Test retrieve with specified temporary directory
workflow = Workflow([RetrieveTask(directory=os.path.join(tempfile.gettempdir(), "retrieve"))])
results = list(workflow(["file://" + Utils.PATH + "/books.jpg"]))
self.assertTrue(results[0].endswith("books.jpg"))
def testScheduleWorkflow(self):
"""
Tests workflow schedules
"""
# Test workflow schedule with Python
workflow = Workflow([Task()])
workflow.schedule("* * * * * *", ["test"], 1)
self.assertEqual(len(workflow.tasks), 1)
# Test workflow schedule with YAML
workflow = """
segmentation:
sentences: true
workflow:
segment:
schedule:
cron: '* * * * * *'
elements:
- a sentence to segment
iterations: 1
tasks:
- action: segmentation
task: console
"""
output = io.StringIO()
with contextlib.redirect_stdout(output):
app = API(workflow)
app.wait()
self.assertIn("a sentence to segment", output.getvalue())
def testScheduleErrorWorkflow(self):
"""
Tests workflow schedules with errors
"""
def action(elements):
raise FileNotFoundError
# Test workflow proceeds after exception raised
with self.assertLogs() as logs:
workflow = Workflow([Task(action=action)])
workflow.schedule("* * * * * *", ["test"], 1)
self.assertIn("FileNotFoundError", " ".join(logs.output))
def testStorageWorkflow(self):
"""
Tests a storage task
"""
workflow = Workflow([StorageTask()])
results = list(workflow(["local://" + Utils.PATH, "test string"]))
self.assertEqual(len(results), 19)
def testTensorTransformWorkflow(self):
"""
Tests a tensor workflow with list transformations
"""
# Test one-one list transformation
task = Task(lambda x: x.tolist())
workflow = Workflow([task])
results = list(workflow(np.array([2])))
self.assertEqual(results, [2])
# Test one-many list transformation
task = Task(lambda x: [x.tolist() * 2])
workflow = Workflow([task])
results = list(workflow(np.array([2])))
self.assertEqual(results, [2, 2])
def testTorchWorkflow(self):
"""
Tests a torch workflow
"""
# pylint: disable=E1101,E1102
task = Task([lambda x: torch.pow(x, 2), lambda x: torch.pow(x, 3)], merge="hstack")
# Test hstack (column-wise) merge
workflow = Workflow([task])
results = np.array([x.numpy() for x in workflow(torch.tensor([2, 4]))])
self.assertTrue(np.array_equal(results, np.array([[4, 8], [16, 64]])))
# Test vstack (row-wise) merge
task.merge = "vstack"
results = list(workflow(torch.tensor([2, 4])))
self.assertEqual(results, [4, 8, 16, 64])
# Test no merge
task.merge = None
results = np.array([x.numpy() for x in workflow(torch.tensor([2, 4, 6]))])
self.assertTrue(np.array_equal(np.array(results), np.array([[4, 16, 36], [8, 64, 216]])))
def testYamlFunctionWorkflow(self):
"""
Tests YAML workflow with a function action
"""
# Create function and add to module
def action(elements):
return [x * 2 for x in elements]
sys.modules[__name__].action = action
workflow = """
workflow:
run:
tasks:
- testworkflow.action
"""
app = API(workflow)
self.assertEqual(list(app.workflow("run", [1, 2])), [2, 4])
def testYamlIndexWorkflow(self):
"""
Tests reading a YAML index workflow in Python.
"""
app = API(self.config)
self.assertEqual(
list(app.workflow("index", ["This is a test sentence. And another sentence to split."])),
["This is a test sentence.", "And another sentence to split."],
)
# Read from file
path = os.path.join(tempfile.gettempdir(), "workflow.yml")
with open(path, "w", encoding="utf-8") as f:
f.write(self.config)
app = API(path)
self.assertEqual(
list(app.workflow("index", ["This is a test sentence. And another sentence to split."])),
["This is a test sentence.", "And another sentence to split."],
)
# Read from YAML object
app = API(API.read(self.config))
self.assertEqual(
list(app.workflow("index", ["This is a test sentence. And another sentence to split."])),
["This is a test sentence.", "And another sentence to split."],
)
def testYamlSearchWorkflow(self):
"""
Test reading a YAML search workflow in Python.
"""
# Test search
app = API(self.config)
list(app.workflow("index", ["This is a test sentence. And another sentence to split."]))
self.assertEqual(
list(app.workflow("search", ["another"]))[0]["text"],
"And another sentence to split.",
)
def testYamlWorkflowTask(self):
"""
Tests YAML workflow with a workflow task
"""
# Create function and add to module
def action(elements):
return [x * 2 for x in elements]
sys.modules[__name__].action = action
workflow = """
workflow:
run:
tasks:
- testworkflow.action
flow:
tasks:
- run
"""
app = API(workflow)
self.assertEqual(list(app.workflow("flow", [1, 2])), [2, 4])
def testYamlTransformWorkflow(self):
"""
Test reading a YAML transform workflow in Python.
"""
# Test search
app = API(self.config)
self.assertEqual(len(list(app.workflow("transform", ["text"]))[0]), 128)
def testYamlError(self):
"""
Tests reading a YAML workflow with errors.
"""
# Read from string
config = """
# Workflow definitions
workflow:
error:
tasks:
- action: error
"""
with self.assertRaises(KeyError):
API(config)
| 2.28125
| 2
|
apps/summary/urls.py
|
sotkonstantinidis/testcircle
| 3
|
12663
|
from django.conf.urls import url
from .views import SummaryPDFCreateView
urlpatterns = [
url(r'^(?P<id>[\d]+)/$',
SummaryPDFCreateView.as_view(),
name='questionnaire_summary'),
]
| 1.65625
| 2
|
utipy/array/blend.py
|
LudvigOlsen/utipy
| 0
|
12664
|
"""
@author: ludvigolsen
"""
from typing import Union
import numpy as np
import pandas as pd
from utipy.utils.check_instance import check_instance
from utipy.utils.convert_to_type import convert_to_type
def blend(x1: Union[list, np.ndarray, pd.Series], x2: Union[list, np.ndarray, pd.Series], amount: float = 0.5) -> Union[list, np.ndarray, pd.Series]:
"""
Blend two arrays
Parameters
----------
x1 : list, np.ndarray, pd.Series
The first array.
x2 : list, np.ndarray, pd.Series
The second array.
amount : float
Blend rate.
Percentage between 0-1
0: Keep only x1.
1: Keep only x2.
0.1: 10% x2 / 90% x1.
A value in-between 0-1 will result in integers becoming floats.
Returns
-------
list, np.ndarray, pd.Series
Blended array with type of the original (x1)
Examples
--------
Uncomment code to run.
# x1 = [1,2,3,4,5]
# x2 = [4,5,6,7,8]
# blend(x1, x2, amount = 0.5)
returns [2.5,3.5,4.5,5.5,6.5]
"""
# Get instance types (np.ndarray, list, pd.Series)
instance_type = check_instance(x1)
x1_weighted = np.multiply(x1, (1 - amount))
x2_weighted = np.multiply(x2, amount)
blended = x1_weighted + x2_weighted
# Convert to original type (np.ndarray, list, pd.Series)
return convert_to_type(blended, instance_type)
| 2.921875
| 3
|
output/models/ms_data/regex/hangul_compatibility_jamo_xsd/__init__.py
|
tefra/xsdata-w3c-tests
| 1
|
12665
|
<reponame>tefra/xsdata-w3c-tests
from output.models.ms_data.regex.hangul_compatibility_jamo_xsd.hangul_compatibility_jamo import Doc
__all__ = [
"Doc",
]
| 0.917969
| 1
|
ex082.py
|
favitoria/python123
| 0
|
12666
|
resposta = 'Ss'
numeros = 0
listaTODOS = []
listaPAR = []
listaIMPAR = []
while resposta != 'N':
numeros = int(input('Digite um número: '))
resposta = str(input('Deseja continuar [S/N]? '))
if numeros % 2 == 0:
listaPAR.append(numeros)
elif numeros % 2 == 1:
listaIMPAR.append(numeros)
listaTODOS.append(numeros)
print(f'Os valores PARES digitados foram: {listaPAR}')
print(f'Os valores IMPARES digitados foram: {listaIMPAR}')
listaTODOS.sort()
print(f'No TOTAL foram: {listaTODOS}')
| 3.90625
| 4
|
CodingInterview2/29_PrintMatrix/print_matrix.py
|
hscspring/TheAlgorithms-Python
| 10
|
12667
|
<gh_stars>1-10
"""
面试题 29:顺时针打印矩阵
题目:输入一个矩阵,按照从外向里以顺时针的顺序依次打印出每一个数字。
"""
def make_matrix(rows: int, cols: int) -> list:
res = []
k = 0
for i in range(rows):
tmp = []
for j in range(cols):
k += 1
tmp.append(k)
res.append(tmp)
return res
def print_matrix_clockwisely(matrix: list) -> list:
"""
Print the given matrix clockwesely.
Parameters
-----------
matrix: list[list]
the given matrix.
Returns
---------
out: list
the clockwise order of the matrix.
Notes
------
"""
if not matrix:
return []
if not matrix[0]:
return []
res = []
start = 0
rows, cols = len(matrix), len(matrix[0])
while rows > 2 * start and cols > 2 * start:
print_circle2(matrix, rows, cols, start, res)
start += 1
return res
def print_circle(matrix: list, rows: int, cols: int, start: int, res: list):
endx = cols - 1 - start
endy = rows - 1 - start
# left -> right
for i in range(start, endx+1):
res.append(matrix[start][i])
# up -> below
if start < endy:
for i in range(start+1, endy+1):
res.append(matrix[i][endx])
# right -> left
if start < endx and start < endy:
for i in reversed(range(start, endx)):
res.append(matrix[endy][i])
# below -> up
if start < endx and start < endy - 1:
for i in reversed(range(start+1, endy)):
res.append(matrix[i][start])
def print_circle2(matrix: list, rows: int, cols: int, start: int, res: list):
endx = cols - 1 - start
endy = rows - 1 - start
# left -> right
for i in range(start, endx+1):
res.append(matrix[start][i])
# up -> below
for i in range(start+1, endy+1):
res.append(matrix[i][endx])
# right -> left
if start < endy:
for i in reversed(range(start, endx)):
res.append(matrix[endy][i])
# below -> up
if start < endx:
for i in reversed(range(start+1, endy)):
res.append(matrix[i][start])
if __name__ == '__main__':
m = make_matrix(1,5)
print(m)
res = print_matrix_clockwisely(m)
print(res)
| 4.0625
| 4
|
migrations/versions/ee5315dcf3e1_.py
|
wildintellect/tasking-manager
| 3
|
12668
|
<reponame>wildintellect/tasking-manager<filename>migrations/versions/ee5315dcf3e1_.py
"""empty message
Revision ID: ee<PASSWORD>
Revises: <PASSWORD>
Create Date: 2017-05-24 10:39:46.586986
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ee<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('email_address', sa.String(), nullable=True))
op.add_column('users', sa.Column('facebook_id', sa.String(), nullable=True))
op.add_column('users', sa.Column('is_email_verified', sa.Boolean(), nullable=True))
op.add_column('users', sa.Column('linkedin_id', sa.String(), nullable=True))
op.add_column('users', sa.Column('twitter_id', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'twitter_id')
op.drop_column('users', 'linkedin_id')
op.drop_column('users', 'is_email_verified')
op.drop_column('users', 'facebook_id')
op.drop_column('users', 'email_address')
# ### end Alembic commands ###
| 1.320313
| 1
|
python/random-videogame-generator.py
|
iamashiq/Hacktoberfest2021-2
| 6
|
12669
|
<filename>python/random-videogame-generator.py<gh_stars>1-10
print("Are you trying to find new videogames to play?")
print("let me help you!")
print("do you like shooting games, yes or no")
shooting=input()
if shooting = "yes"
print("do you like battle royale games?")
br=input()
if br="yes"
print("you should try out call of duty!")
else if br="no"
print("you should try overwatch!")
else if shooting="no"
print("do you like sports games, yes or no")
sports=input()
if sports="yes"
print("try out Fifa or NBA2k!")
else if sports="no"
print("I know, try out rocket league!")
| 3.421875
| 3
|
cosmic_ray/operators/unary_operator_replacement.py
|
rob-smallshire/cosmic-ray
| 0
|
12670
|
<gh_stars>0
"""Implementation of the unary-operator-replacement operator.
"""
import ast
from .operator import Operator
from ..util import build_mutations
# None indicates we want to delete the operator
OPERATORS = (ast.UAdd, ast.USub, ast.Invert, ast.Not, None)
def _to_ops(from_op):
"""
The sequence of operators which `from_op` could be mutated to.
"""
for to_op in OPERATORS:
if to_op and isinstance(from_op, ast.Not):
# 'not' can only be removed but not replaced with
# '+', '-' or '~' b/c that may lead to strange results
pass
elif isinstance(from_op, ast.UAdd) and (to_op is None):
# '+1' => '1' yields equivalent mutations
pass
else:
yield to_op
class MutateUnaryOperator(Operator):
"""An operator that modifies unary operators."""
def visit_UnaryOp(self, node): # pylint: disable=invalid-name
"""
http://greentreesnakes.readthedocs.io/en/latest/nodes.html#UnaryOp
"""
return self.visit_mutation_site(
node,
len(build_mutations([node.op], _to_ops)))
def mutate(self, node, idx):
"Perform the `idx`th mutation on node."
_, to_op = build_mutations([node.op], _to_ops)[idx]
if to_op:
node.op = to_op()
return node
return node.operand
| 3.28125
| 3
|
src/icolos/core/workflow_steps/calculation/rmsd.py
|
jharrymoore/Icolos
| 11
|
12671
|
from typing import List
from pydantic import BaseModel
from icolos.core.containers.compound import Conformer, unroll_conformers
from icolos.utils.enums.step_enums import StepRMSDEnum, StepDataManipulationEnum
from icolos.core.workflow_steps.step import _LE
from icolos.core.workflow_steps.calculation.base import StepCalculationBase
_SR = StepRMSDEnum()
_SDM = StepDataManipulationEnum()
class StepRMSD(StepCalculationBase, BaseModel):
def __init__(self, **data):
super().__init__(**data)
# extend parameters
if _SR.METHOD not in self.settings.additional.keys():
self.settings.additional[_SR.METHOD] = _SR.METHOD_ALIGNMOL
def _calculate_RMSD(self, conformers: List[Conformer]):
for conf in conformers:
rmsd_matrix = self._calculate_rms_matrix(
conformers=[conf] + conf.get_extra_data()[_SDM.KEY_MATCHED],
rms_method=self._get_rms_method(),
)
# use the specified tag name if it is the first value and append an index in case there are more
for idx, col in enumerate(rmsd_matrix.columns[1:]):
combined_tag = "".join([_SR.RMSD_TAG, "" if idx == 0 else str(idx)])
rmsd_value = rmsd_matrix.iloc[[0]][col][0]
conf.get_molecule().SetProp(combined_tag, str(rmsd_value))
conf.get_extra_data()[_SDM.KEY_MATCHED][idx].get_molecule().SetProp(
combined_tag, str(rmsd_value)
)
def execute(self):
# this assumes that the conformers that are to be matched for the calculation of the RMSD matrix, are attached
# as a list in a generic data field with a specified key
conformers = unroll_conformers(compounds=self.get_compounds())
self._calculate_RMSD(conformers=conformers)
self._logger.log(
f"Annotated {len(conformers)} conformers with RMSD values (tag: {_SR.RMSD_TAG}).",
_LE.INFO,
)
# TODO: add a nice pandas DF with the RMSD values to a generic data field
| 2.09375
| 2
|
iot/iot_portal/doctype/iot_homepage/iot_homepage.py
|
srdgame/symlink_iot
| 4
|
12672
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.website.utils import delete_page_cache
class IOTHomepage(Document):
def validate(self):
if not self.description:
self.description = frappe._("This is an example website auto-generated from IOT")
delete_page_cache('iot_home')
| 2.03125
| 2
|
src/garage/envs/env_spec.py
|
Maltimore/garage
| 2
|
12673
|
<filename>src/garage/envs/env_spec.py
"""EnvSpec class."""
class EnvSpec:
"""EnvSpec class.
Args:
observation_space (akro.Space): The observation space of the env.
action_space (akro.Space): The action space of the env.
"""
def __init__(self, observation_space, action_space):
self.observation_space = observation_space
self.action_space = action_space
| 1.867188
| 2
|
exercises/exe41 - 50/exe047.py
|
thomas-rohde/Classes-Python
| 0
|
12674
|
t = int(input('Digite um nº: '))
for t0 in range(1, 11):
print('{} X {} = {}'.format(t, t0, t * t0))
| 3.6875
| 4
|
py_build/funcs.py
|
Aesonus/py-build
| 0
|
12675
|
from __future__ import annotations
from typing import Callable, Sequence, TYPE_CHECKING
import functools
if TYPE_CHECKING:
from .build import BuildStepCallable
def split_step_name(name: str, new = ' ', old='_'):
return name.replace(old, new).capitalize()
def print_step_name(formatter=split_step_name, args: Sequence=()):
"""Gets a decorator that formats the name of the build step and prints it"""
fmt_args = args
def format_step_name(func: Callable):
@functools.wraps(func)
def decorated(*args, **kwargs):
print(formatter(func.__name__, *fmt_args))
return func(*args, **kwargs)
return decorated
return format_step_name
def print_step_doc():
def decorate_with(func: Callable):
@functools.wraps(func)
def output_func_doc(*args, **kwargs):
print(func.__doc__)
return func(*args, *kwargs)
return output_func_doc
return decorate_with
def composed(*decorators: BuildStepCallable) -> BuildStepCallable:
"""
Used to compose a decorator. Useful for defining specific
outputs and progress reports to a build step and resusing
"""
def decorated(func: BuildStepCallable):
for decorator in reversed(decorators):
func = decorator(func)
return func
return decorated
| 2.9375
| 3
|
src/jellyroll/managers.py
|
jacobian-archive/jellyroll
| 3
|
12676
|
import datetime
from django.db import models
from django.db.models import signals
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_unicode
from tagging.fields import TagField
class ItemManager(models.Manager):
def __init__(self):
super(ItemManager, self).__init__()
self.models_by_name = {}
def create_or_update(self, instance, timestamp=None, url=None, tags="", source="INTERACTIVE", source_id="", **kwargs):
"""
Create or update an Item from some instace.
"""
# If the instance hasn't already been saved, save it first. This
# requires disconnecting the post-save signal that might be sent to
# this function (otherwise we could get an infinite loop).
if instance._get_pk_val() is None:
try:
signals.post_save.disconnect(self.create_or_update, sender=type(instance))
except Exception, err:
reconnect = False
else:
reconnect = True
instance.save()
if reconnect:
signals.post_save.connect(self.create_or_update, sender=type(instance))
# Make sure the item "should" be registered.
if not getattr(instance, "jellyrollable", True):
return
# Check to see if the timestamp is being updated, possibly pulling
# the timestamp from the instance.
if hasattr(instance, "timestamp"):
timestamp = instance.timestamp
if timestamp is None:
update_timestamp = False
timestamp = datetime.datetime.now()
else:
update_timestamp = True
# Ditto for tags.
if not tags:
for f in instance._meta.fields:
if isinstance(f, TagField):
tags = getattr(instance, f.attname)
break
if not url:
if hasattr(instance,'url'):
url = instance.url
# Create the Item object.
ctype = ContentType.objects.get_for_model(instance)
item, created = self.get_or_create(
content_type = ctype,
object_id = force_unicode(instance._get_pk_val()),
defaults = dict(
timestamp = timestamp,
source = source,
source_id = source_id,
tags = tags,
url = url,
)
)
item.tags = tags
item.source = source
item.source_id = source_id
if update_timestamp:
item.timestamp = timestamp
# Save and return the item.
item.save()
return item
def follow_model(self, model):
"""
Follow a particular model class, updating associated Items automatically.
"""
self.models_by_name[model.__name__.lower()] = model
signals.post_save.connect(self.create_or_update, sender=model)
def get_for_model(self, model):
"""
Return a QuerySet of only items of a certain type.
"""
return self.filter(content_type=ContentType.objects.get_for_model(model))
def get_last_update_of_model(self, model, **kwargs):
"""
Return the last time a given model's items were updated. Returns the
epoch if the items were never updated.
"""
qs = self.get_for_model(model)
if kwargs:
qs = qs.filter(**kwargs)
try:
return qs.order_by('-timestamp')[0].timestamp
except IndexError:
return datetime.datetime.fromtimestamp(0)
| 2.28125
| 2
|
app_metrics.py
|
GSH-LAN/byceps
| 33
|
12677
|
"""
metrics application instance
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
import os
from byceps.config import ConfigurationError
from byceps.metrics.application import create_app
ENV_VAR_NAME_DATABASE_URI = 'DATABASE_URI'
database_uri = os.environ.get(ENV_VAR_NAME_DATABASE_URI)
if not database_uri:
raise ConfigurationError(
f"No database URI was specified via the '{ENV_VAR_NAME_DATABASE_URI}' "
"environment variable.",
)
app = create_app(database_uri)
| 1.898438
| 2
|
mysql_tests/test_schema.py
|
maestro-1/gino
| 1,376
|
12678
|
from enum import Enum
import pytest
import gino
from gino.dialects.aiomysql import AsyncEnum
pytestmark = pytest.mark.asyncio
db = gino.Gino()
class MyEnum(Enum):
ONE = "one"
TWO = "two"
class Blog(db.Model):
__tablename__ = "s_blog"
id = db.Column(db.BigInteger(), primary_key=True)
title = db.Column(db.Unicode(255), index=True, comment="Title Comment")
visits = db.Column(db.BigInteger(), default=0)
comment_id = db.Column(db.ForeignKey("s_comment.id"))
number = db.Column(db.Enum(MyEnum), nullable=False, default=MyEnum.TWO)
number2 = db.Column(AsyncEnum(MyEnum), nullable=False, default=MyEnum.TWO)
class Comment(db.Model):
__tablename__ = "s_comment"
id = db.Column(db.BigInteger(), primary_key=True)
blog_id = db.Column(db.ForeignKey("s_blog.id", name="blog_id_fk"))
blog_seq = db.Sequence("blog_seq", metadata=db, schema="schema_test")
async def test(engine, define=True):
async with engine.acquire() as conn:
assert not await engine.dialect.has_table(conn, "non_exist")
Blog.__table__.comment = "Blog Comment"
db.bind = engine
await db.gino.create_all()
await Blog.number.type.create_async(engine, checkfirst=True)
await Blog.number2.type.create_async(engine, checkfirst=True)
await db.gino.create_all(tables=[Blog.__table__], checkfirst=True)
await blog_seq.gino.create(checkfirst=True)
await Blog.__table__.gino.create(checkfirst=True)
await db.gino.drop_all()
await db.gino.drop_all(tables=[Blog.__table__], checkfirst=True)
await Blog.__table__.gino.drop(checkfirst=True)
await blog_seq.gino.drop(checkfirst=True)
if define:
class Comment2(db.Model):
__tablename__ = "s_comment_2"
id = db.Column(db.BigInteger(), primary_key=True)
blog_id = db.Column(db.ForeignKey("s_blog.id"))
await db.gino.create_all()
await db.gino.drop_all()
| 2.28125
| 2
|
solutions/29-distinct-powers.py
|
whitegreyblack/euler
| 0
|
12679
|
<gh_stars>0
# problem 29
# Distinct powers
"""
Consider all integer combinations of ab for 2 ≤ a ≤ 5 and 2 ≤ b ≤ 5:
2**2=4, 2**3=8, 2**4=16, 2**5=32
3**2=9, 3**3=27, 3**4=81, 3**5=243
4**2=16, 4**3=64, 4**4=256, 4**5=1024
5**2=25, 5**3=125, 5**4=625, 5**5=3125
If they are then placed in numerical order, with any repeats removed,
we get the following sequence of 15 distinct terms:
4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125
How many distinct terms are in the sequence generated by ab for
2 ≤ a ≤ 100 and 2 ≤ b ≤ 100?
"""
# analysis
"""
^ | 2 | 3 | 4 | 5 | N |
---+---+---+---+----+---+
2 | 4 | 8 | 16| 25 |2^N|
---+---+---+---+----+---+
3 | 9 | 27| 81| 243|3^N|
---+---+---+---+----+---+
4 | 16| 64|256|1024|4^N|
---+---+---+---+----+---+
5 | 25|125|625|3125|5^N|
---+---+---+---+----+---+
"""
# solution
s = set(a**b for a in range(2, 101) for b in range(2, 101))
print(len(s))
| 3.296875
| 3
|
flexget/plugins/input/input_csv.py
|
metaMMA/Flexget
| 0
|
12680
|
<reponame>metaMMA/Flexget
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.utils import PY3
import logging
import csv
from requests import RequestException
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
log = logging.getLogger('csv')
class InputCSV(object):
"""
Adds support for CSV format. Configuration may seem a bit complex,
but this has advantage of being universal solution regardless of CSV
and internal entry fields.
Configuration format:
csv:
url: <url>
values:
<field>: <number>
Example DB-fansubs:
csv:
url: http://www.dattebayo.com/t/dump
values:
title: 3 # title is in 3th field
url: 1 # download url is in 1st field
Fields title and url are mandatory. First field is 1.
List of other common (optional) fields can be found from wiki.
"""
schema = {
'type': 'object',
'properties': {
'url': {'type': 'string', 'format': 'url'},
'values': {
'type': 'object',
'additionalProperties': {'type': 'integer'},
'required': ['title', 'url'],
},
},
'required': ['url', 'values'],
'additionalProperties': False,
}
@cached('csv')
def on_task_input(self, task, config):
entries = []
try:
r = task.requests.get(config['url'])
except RequestException as e:
raise plugin.PluginError('Error fetching `%s`: %s' % (config['url'], e))
# CSV module needs byte strings, we'll convert back to unicode later
if PY3:
page = r.text.splitlines()
else:
page = r.text.encode('utf-8').splitlines()
for row in csv.reader(page):
if not row:
continue
entry = Entry()
for name, index in list(config.get('values', {}).items()):
try:
# Convert the value back to unicode
if PY3:
entry[name] = row[index - 1].strip()
else:
entry[name] = row[index - 1].decode('utf-8').strip()
except IndexError:
raise plugin.PluginError('Field `%s` index is out of range' % name)
entries.append(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(InputCSV, 'csv', api_ver=2)
| 2.390625
| 2
|
gdget.py
|
tienfuc/gdcmdtools
| 29
|
12681
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
from gdcmdtools.base import BASE_INFO
from gdcmdtools.base import DEBUG_LEVEL
from gdcmdtools.get import GDGet
from gdcmdtools.get import export_format
import argparse
from argparse import RawTextHelpFormatter
from pprint import pprint
import logging
logger = logging.getLogger()
__THIS_APP = 'gdget'
__THIS_DESCRIPTION = 'Tool to download file from Google Drive'
__THIS_VERSION = BASE_INFO["version"]
def test():
assert True
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(
description='%s v%s - %s - %s (%s)' %
(__THIS_APP,
__THIS_VERSION,
__THIS_DESCRIPTION,
BASE_INFO["app"],
BASE_INFO["description"]),
formatter_class=RawTextHelpFormatter)
arg_parser.add_argument(
'file_id',
help='The file id or drive link for the file you\'re going to download')
help_export_format = "\n".join(
[
re.search(
".*google-apps\.(.*)",
k).group(1) +
": " +
", ".join(
export_format[k]) for k in export_format.iterkeys()])
arg_parser.add_argument(
'-f',
'--export_format',
metavar='FORMAT',
default='raw',
required=False,
help='specify the export format for downloading,\ngoogle_format: export_format\n%s' %
help_export_format)
arg_parser.add_argument(
'-s',
'--save_as',
metavar='NEW_FILE_NAME',
help='save the downloaded file as ')
arg_parser.add_argument('--debug',
choices=DEBUG_LEVEL,
default=DEBUG_LEVEL[-1],
help='define the debug level')
args = arg_parser.parse_args()
# set debug devel
logger.setLevel(getattr(logging, args.debug.upper()))
logger.debug(args)
get = GDGet(args.file_id, args.export_format, args.save_as)
result = get.run()
sys.exit(0)
| 2.5
| 2
|
Lotus/controller/common.py
|
Jayin/Lotus
| 0
|
12682
|
# -*- coding: utf-8 -*-
from Lotus.app import app
from flask import render_template
@app.route('/')
def index():
return 'welcome'
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html')
@app.errorhandler(405)
def request_method_error(error):
return render_template('405.html')
| 2.3125
| 2
|
src/retrocookie/git.py
|
cjolowicz/retrocookie
| 15
|
12683
|
"""Git interface."""
from __future__ import annotations
import contextlib
import functools
import operator
import re
import subprocess # noqa: S404
from dataclasses import dataclass
from dataclasses import field
from pathlib import Path
from typing import Any
from typing import cast
from typing import Iterator
from typing import List
from typing import Optional
import pygit2
from retrocookie.utils import removeprefix
def git(
*args: str, check: bool = True, **kwargs: Any
) -> subprocess.CompletedProcess[str]:
"""Invoke git."""
return subprocess.run( # noqa: S603,S607
["git", *args], check=check, text=True, capture_output=True, **kwargs
)
VERSION_PATTERN = re.compile(
r"""
(?P<major>\d+)\.
(?P<minor>\d+)
(\.(?P<patch>\d+))?
""",
re.VERBOSE,
)
@dataclass(frozen=True, order=True)
class Version:
"""Simplistic representation of git versions."""
major: int
minor: int
patch: int
_text: Optional[str] = field(default=None, compare=False)
@classmethod
def parse(cls, text: str) -> Version:
"""Extract major.minor[.patch] from the start of the text."""
match = VERSION_PATTERN.match(text)
if match is None:
raise ValueError(f"invalid version {text!r}")
parts = match.groupdict(default="0")
return cls(
int(parts["major"]), int(parts["minor"]), int(parts["patch"]), _text=text
)
def __str__(self) -> str:
"""Return the original representation."""
return (
self._text
if self._text is not None
else f"{self.major}.{self.minor}.{self.patch}"
)
def version() -> Version:
"""Return the git version."""
text = git("version").stdout.strip()
text = removeprefix(text, "git version ")
return Version.parse(text)
def get_default_branch() -> str:
"""Return the default branch for new repositories."""
get_configs = [
pygit2.Config.get_global_config,
pygit2.Config.get_system_config,
]
for get_config in get_configs:
with contextlib.suppress(IOError, KeyError):
config = get_config()
branch = config["init.defaultBranch"]
assert isinstance(branch, str) # noqa: S101
return branch
return "master"
class Repository:
"""Git repository."""
def __init__(
self, path: Optional[Path] = None, *, repo: Optional[pygit2.Repository] = None
) -> None:
"""Initialize."""
if repo is None:
self.path = path or Path.cwd()
self.repo = pygit2.Repository(self.path)
else:
self.path = Path(repo.workdir or repo.path)
self.repo = repo
def git(self, *args: str, **kwargs: Any) -> subprocess.CompletedProcess[str]:
"""Invoke git."""
return git(*args, cwd=self.path, **kwargs)
@classmethod
def init(cls, path: Path, *, bare: bool = False) -> Repository:
"""Create a repository."""
# https://github.com/libgit2/libgit2/issues/2849
path.parent.mkdir(exist_ok=True, parents=True)
repo = pygit2.init_repository(path, bare=bare)
return cls(path, repo=repo)
@classmethod
def clone(cls, url: str, path: Path, *, mirror: bool = False) -> Repository:
"""Clone a repository."""
options = ["--mirror"] if mirror else []
git("clone", *options, url, str(path))
return cls(path)
def create_branch(self, branch: str, ref: str = "HEAD") -> None:
"""Create a branch."""
commit = self.repo.revparse_single(ref)
self.repo.branches.create(branch, commit)
def get_current_branch(self) -> str:
"""Return the current branch."""
return self.repo.head.shorthand # type: ignore[no-any-return]
def exists_branch(self, branch: str) -> bool:
"""Return True if the branch exists."""
return branch in self.repo.branches
def switch_branch(self, branch: str) -> None:
"""Switch the current branch."""
self.repo.checkout(self.repo.branches[branch])
def update_remote(self) -> None:
"""Update the remotes."""
self.git("remote", "update")
def fetch_commits(self, source: Repository, *commits: str) -> None:
"""Fetch the given commits and their immediate parents."""
path = source.path.resolve()
self.git("fetch", "--no-tags", "--depth=2", str(path), *commits)
def push(self, remote: str, *refs: str, force: bool = False) -> None:
"""Update remote refs."""
options = ["--force-with-lease"] if force else []
self.git("push", *options, remote, *refs)
def parse_revisions(self, *revisions: str) -> List[str]:
"""Parse revisions using the format specified in gitrevisions(7)."""
process = self.git("rev-list", "--no-walk", *revisions)
result = process.stdout.split()
result.reverse()
return result
def lookup_replacement(self, commit: str) -> str:
"""Lookup the replace ref for the given commit."""
refname = f"refs/replace/{commit}"
ref = self.repo.lookup_reference(refname)
return cast(str, ref.target.hex)
def _ensure_relative(self, path: Path) -> Path:
"""Interpret the path relative to the repository root."""
return path.relative_to(self.path) if path.is_absolute() else path
def read_text(self, path: Path, *, ref: str = "HEAD") -> str:
"""Return the contents of the blob at the given path."""
commit = self.repo.revparse_single(ref)
path = self._ensure_relative(path)
blob = functools.reduce(operator.truediv, path.parts, commit.tree)
return cast(str, blob.data.decode())
def exists(self, path: Path, *, ref: str = "HEAD") -> bool:
"""Return True if a blob exists at the given path."""
commit = self.repo.revparse_single(ref)
path = self._ensure_relative(path)
try:
functools.reduce(operator.truediv, path.parts, commit.tree)
return True
except KeyError:
return False
def add(self, *paths: Path) -> None:
"""Add paths to the index."""
for path in paths:
path = self._ensure_relative(path)
self.repo.index.add(path)
else:
self.repo.index.add_all()
self.repo.index.write()
def commit(self, message: str) -> None:
"""Create a commit."""
try:
head = self.repo.head
refname = head.name
parents = [head.target]
except pygit2.GitError:
branch = get_default_branch()
refname = f"refs/heads/{branch}"
parents = []
tree = self.repo.index.write_tree()
author = committer = self.repo.default_signature
self.repo.create_commit(refname, author, committer, message, tree, parents)
def cherrypick(self, *refs: str) -> None:
"""Cherry-pick the given commits."""
self.git("cherry-pick", *refs)
@contextlib.contextmanager
def worktree(
self,
branch: str,
path: Path,
*,
base: str = "HEAD",
force: bool = False,
force_remove: bool = False,
) -> Iterator[Repository]:
"""Context manager to add and remove a worktree."""
repository = self.add_worktree(branch, path, base=base, force=force)
try:
yield repository
finally:
self.remove_worktree(path, force=force_remove)
def add_worktree(
self,
branch: str,
path: Path,
*,
base: str = "HEAD",
force: bool = False,
) -> Repository:
"""Add a worktree."""
self.git(
"worktree",
"add",
str(path),
"--no-track",
"-B" if force else "-b",
branch,
base,
)
return Repository(path)
def remove_worktree(self, path: Path, *, force: bool = False) -> None:
"""Remove a worktree."""
if force:
self.git("worktree", "remove", "--force", str(path))
else:
self.git("worktree", "remove", str(path))
| 2.40625
| 2
|
apps/user/views.py
|
awsbreathpanda/dailyfresh
| 0
|
12684
|
<gh_stars>0
from django.shortcuts import redirect
from django.contrib.auth import authenticate, login, logout
from celery_tasks.tasks import celery_send_mail
from apps.user.models import User
import re
from django.shortcuts import render
from django.views import View
from utils.security import get_user_token, get_activation_link, get_user_id
from django.conf import settings
from django.http import HttpResponse
from django.urls import reverse
# Create your views here.
# /user/register
class RegisterView(View):
def get(self, request):
return render(request, 'user_register.html')
def post(self, request):
username = request.POST.get('username')
password = request.POST.get('password')
rpassword = request.POST.get('rpassword')
email = request.POST.get('email')
allow = request.POST.get('allow')
if not all([username, password, rpassword, email, allow]):
context = {'errmsg': '数据不完整'}
return render(request, 'user_register.html', context=context)
if password != rpassword:
context = {'errmsg': '密码不一致'}
return render(request, 'user_register.html', context=context)
if not re.match(r'^[a-z0-9][\w.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$',
email):
context = {'errmsg': '邮箱格式不正确'}
return render(request, 'user_register.html', context=context)
if allow != 'on':
context = {'errmsg': '请同意天天生鲜用户协议'}
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = None
if user is not None:
context = {'errmsg': '已经创建该用户名'}
return render(request, 'user_register.html', context=context)
user = User.objects.create_user(username, email, password)
user.is_active = 0
user.save()
user_token = get_user_token(user.id)
activation_link = get_activation_link(settings.ACTIVATION_URL_PATH,
user_token)
# send email
subject = '天天生鲜欢迎信息'
message = ''
html_message = (
'<h1>%s,欢迎您成为天天生鲜的注册会员</h1><p>请点击以下链接激活你的账户</p><br><a href="%s">%s</a>'
% (username, activation_link, activation_link))
from_email = 'dailyfresh<<EMAIL>>'
recipient_list = [
'<EMAIL>',
]
celery_send_mail.delay(subject,
message,
from_email,
recipient_list,
html_message=html_message)
context = {'errmsg': '添加用户成功'}
return render(request, 'user_register.html', context=context)
# /user/activate/(token)
class ActivateView(View):
def get(self, request, token):
token_bytes = token.encode('utf-8')
user_id = get_user_id(token_bytes)
user = User.objects.get(id=user_id)
user.is_active = 1
user.save()
# TODO
return HttpResponse('<h1>Activate User Successfully</h1>')
# /user/login
class LoginView(View):
def get(self, request):
username = request.COOKIES.get('username')
checked = 'checked'
if username is None:
username = ''
checked = ''
context = {'username': username, 'checked': checked}
return render(request, 'user_login.html', context=context)
def post(self, request):
username = request.POST.get('username')
password = request.POST.get('password')
remember = request.POST.get('remember')
if not all([username, password]):
context = {'errmsg': '参数不完整'}
return render(request, 'user_login.html', context=context)
user = authenticate(request, username=username, password=password)
if user is None:
context = {'errmsg': '用户不存在'}
return render(request, 'user_login.html', context=context)
if not user.is_active:
context = {'errmsg': '用户未激活'}
return render(request, 'user_login.html', context=context)
login(request, user)
next_url = request.GET.get('next', reverse('goods:index'))
response = redirect(next_url)
if remember == 'on':
response.set_cookie('username', username, max_age=7 * 24 * 3600)
else:
response.delete_cookie('username')
return response
# /user/
class UserInfoView(View):
def get(self, request):
if not request.user.is_authenticated:
next_url = reverse(
'user:login') + '?next=' + request.get_full_path()
return redirect(next_url)
else:
return render(request, 'user_center_info.html')
# /user/order/(page)
class UserOrderView(View):
def get(self, request, page):
if not request.user.is_authenticated:
next_url = reverse(
'user:login') + '?next=' + request.get_full_path()
return redirect(next_url)
else:
return render(request, 'user_center_order.html')
# /user/address
class UserAddressView(View):
def get(self, request):
if not request.user.is_authenticated:
next_url = reverse(
'user:login') + '?next=' + request.get_full_path()
return redirect(next_url)
else:
return render(request, 'user_center_site.html')
# /user/logout
class LogoutView(View):
def get(self, request):
logout(request)
return redirect(reverse('goods:index'))
| 2.078125
| 2
|
heatzy/pilote_v1.py
|
Devotics/heatzy-home-hassistant
| 22
|
12685
|
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (HVAC_MODE_AUTO,
PRESET_AWAY,
PRESET_COMFORT, PRESET_ECO,
PRESET_NONE,
SUPPORT_PRESET_MODE)
from homeassistant.const import TEMP_CELSIUS
HEATZY_TO_HA_STATE = {
'\u8212\u9002': PRESET_COMFORT,
'\u7ecf\u6d4e': PRESET_ECO,
'\u89e3\u51bb': PRESET_AWAY,
'\u505c\u6b62': PRESET_NONE,
}
HA_TO_HEATZY_STATE = {
PRESET_COMFORT: [1, 1, 0],
PRESET_ECO: [1, 1, 1],
PRESET_AWAY: [1, 1, 2],
PRESET_NONE: [1, 1, 3],
}
class HeatzyPiloteV1Thermostat(ClimateEntity):
def __init__(self, api, device):
self._api = api
self._device = device
@property
def temperature_unit(self):
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_PRESET_MODE
@property
def unique_id(self):
"""Return a unique ID."""
return self._device.get('did')
@property
def name(self):
return self._device.get('dev_alias')
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return [
HVAC_MODE_AUTO
]
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
return HVAC_MODE_AUTO
@property
def preset_modes(self):
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
return [
PRESET_NONE,
PRESET_COMFORT,
PRESET_ECO,
PRESET_AWAY
]
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
return HEATZY_TO_HA_STATE.get(self._device.get('attr').get('mode'))
async def async_set_preset_mode(self, preset_mode):
"""Set new preset mode."""
await self._api.async_control_device(self.unique_id, {
'raw': HA_TO_HEATZY_STATE.get(preset_mode),
})
await self.async_update()
async def async_update(self):
"""Retrieve latest state."""
self._device = await self._api.async_get_device(self.unique_id)
| 2.53125
| 3
|
kubernetes-the-hard-way/system/collections/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py
|
jkroepke/homelab
| 5
|
12686
|
#!/usr/bin/python
#
# Copyright: Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxmox_template
short_description: management of OS templates in Proxmox VE cluster
description:
- allows you to upload/delete templates in Proxmox VE cluster
options:
api_host:
description:
- the host of the Proxmox VE cluster
type: str
required: true
api_user:
description:
- the user to authenticate with
type: str
required: true
api_password:
description:
- the password to authenticate with
- you can use PROXMOX_PASSWORD environment variable
type: str
validate_certs:
description:
- enable / disable https certificate verification
default: 'no'
type: bool
node:
description:
- Proxmox VE node, when you will operate with template
type: str
required: true
src:
description:
- path to uploaded file
- required only for C(state=present)
type: path
template:
description:
- the template name
- required only for states C(absent), C(info)
type: str
content_type:
description:
- content type
- required only for C(state=present)
type: str
default: 'vztmpl'
choices: ['vztmpl', 'iso']
storage:
description:
- target storage
type: str
default: 'local'
timeout:
description:
- timeout for operations
type: int
default: 30
force:
description:
- can be used only with C(state=present), exists template will be overwritten
type: bool
default: 'no'
state:
description:
- Indicate desired state of the template
type: str
choices: ['present', 'absent']
default: present
notes:
- Requires proxmoxer and requests modules on host. This modules can be installed with pip.
requirements: [ "proxmoxer", "requests" ]
author: <NAME> (@UnderGreen)
'''
EXAMPLES = '''
- name: Upload new openvz template with minimal options
community.general.proxmox_template:
node: uk-mc02
api_user: root@pam
api_password: <PASSWORD>
api_host: node1
src: ~/ubuntu-14.04-x86_64.tar.gz
- name: >
Upload new openvz template with minimal options use environment
PROXMOX_PASSWORD variable(you should export it before)
community.general.proxmox_template:
node: uk-mc02
api_user: root@pam
api_host: node1
src: ~/ubuntu-14.04-x86_64.tar.gz
- name: Upload new openvz template with all options and force overwrite
community.general.proxmox_template:
node: uk-mc02
api_user: root@pam
api_password: <PASSWORD>
api_host: node1
storage: local
content_type: vztmpl
src: ~/ubuntu-14.04-x86_64.tar.gz
force: yes
- name: Delete template with minimal options
community.general.proxmox_template:
node: uk-mc02
api_user: root@pam
api_password: <PASSWORD>
api_host: node1
template: ubuntu-14.04-x86_64.tar.gz
state: absent
'''
import os
import time
try:
from proxmoxer import ProxmoxAPI
HAS_PROXMOXER = True
except ImportError:
HAS_PROXMOXER = False
from ansible.module_utils.basic import AnsibleModule
def get_template(proxmox, node, storage, content_type, template):
return [True for tmpl in proxmox.nodes(node).storage(storage).content.get()
if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)]
def upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout):
taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb'))
while timeout:
task_status = proxmox.nodes(api_host.split('.')[0]).tasks(taskid).status.get()
if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for uploading template. Last line in task before timeout: %s'
% proxmox.node(node).tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def delete_template(module, proxmox, node, storage, content_type, template, timeout):
volid = '%s:%s/%s' % (storage, content_type, template)
proxmox.nodes(node).storage(storage).content.delete(volid)
while timeout:
if not get_template(proxmox, node, storage, content_type, template):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for deleting template.')
time.sleep(1)
return False
def main():
module = AnsibleModule(
argument_spec=dict(
api_host=dict(required=True),
api_user=dict(required=True),
api_password=dict(no_log=True),
validate_certs=dict(type='bool', default=False),
node=dict(),
src=dict(type='path'),
template=dict(),
content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']),
storage=dict(default='local'),
timeout=dict(type='int', default=30),
force=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent']),
)
)
if not HAS_PROXMOXER:
module.fail_json(msg='proxmoxer required for this module')
state = module.params['state']
api_user = module.params['api_user']
api_host = module.params['api_host']
api_password = module.params['api_password']
validate_certs = module.params['validate_certs']
node = module.params['node']
storage = module.params['storage']
timeout = module.params['timeout']
# If password not set get it from PROXMOX_PASSWORD env
if not api_password:
try:
api_password = os.environ['PROXMOX_PASSWORD']
except KeyError as e:
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
try:
proxmox = ProxmoxAPI(api_host, user=api_user, password=<PASSWORD>, verify_ssl=validate_certs)
except Exception as e:
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
if state == 'present':
try:
content_type = module.params['content_type']
src = module.params['src']
template = os.path.basename(src)
if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
elif not src:
module.fail_json(msg='src param to uploading template file is mandatory')
elif not (os.path.exists(src) and os.path.isfile(src)):
module.fail_json(msg='template file on path %s not exists' % src)
if upload_template(module, proxmox, api_host, node, storage, content_type, src, timeout):
module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
except Exception as e:
module.fail_json(msg="uploading of template %s failed with exception: %s" % (template, e))
elif state == 'absent':
try:
content_type = module.params['content_type']
template = module.params['template']
if not template:
module.fail_json(msg='template param is mandatory')
elif not get_template(proxmox, node, storage, content_type, template):
module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
if delete_template(module, proxmox, node, storage, content_type, template, timeout):
module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
except Exception as e:
module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e))
if __name__ == '__main__':
main()
| 1.757813
| 2
|
polling_stations/apps/councils/management/commands/import_councils.py
|
DemocracyClub/UK-Polling-Stations
| 29
|
12687
|
<filename>polling_stations/apps/councils/management/commands/import_councils.py<gh_stars>10-100
import json
from html import unescape
import requests
from django.apps import apps
from django.contrib.gis.geos import GEOSGeometry, MultiPolygon, Polygon
from django.conf import settings
from django.core.management.base import BaseCommand
from requests.exceptions import HTTPError
from retry import retry
from councils.models import Council, CouncilGeography
from polling_stations.settings.constants.councils import WELSH_COUNCIL_NAMES
def union_areas(a1, a2):
if not a1:
return a2
return MultiPolygon(a1.union(a2))
NIR_IDS = [
"ABC",
"AND",
"ANN",
"BFS",
"CCG",
"DRS",
"FMO",
"LBC",
"MEA",
"MUL",
"NMD",
]
class Command(BaseCommand):
"""
Turn off auto system check for all apps
We will maunally run system checks only for the
'councils' and 'pollingstations' apps
"""
requires_system_checks = []
contact_details = {}
def add_arguments(self, parser):
parser.add_argument(
"-t",
"--teardown",
default=False,
action="store_true",
required=False,
help="<Optional> Clear Councils and CouncilGeography tables before importing",
)
parser.add_argument(
"-u",
"--alt-url",
required=False,
help="<Optional> Alternative url to override settings.BOUNDARIES_URL",
)
parser.add_argument(
"--only-contact-details",
action="store_true",
help="Only update contact information for imported councils, "
"don't update boundaries",
)
def feature_to_multipolygon(self, feature):
geometry = GEOSGeometry(json.dumps(feature["geometry"]), srid=4326)
if isinstance(geometry, Polygon):
return MultiPolygon(geometry)
return geometry
@retry(HTTPError, tries=2, delay=30)
def get_ons_boundary_json(self, url):
r = requests.get(url)
r.raise_for_status()
"""
When an ArcGIS server can't generate a response
within X amount of time, it will return a 202 ACCEPTED
response with a body like
{
"processingTime": "27.018 seconds",
"status": "Processing",
"generating": {}
}
and expects the client to poll it.
"""
if r.status_code == 202:
raise HTTPError("202 Accepted", response=r)
return r.json()
def attach_boundaries(self, url=None, id_field="LAD20CD"):
"""
Fetch each council's boundary from ONS and attach it to an existing
council object
:param url: The URL of the geoJSON file containing council boundaries
:param id_field: The name of the feature properties field containing
the council ID
:return:
"""
if not url:
url = settings.BOUNDARIES_URL
self.stdout.write("Downloading ONS boundaries from %s..." % (url))
feature_collection = self.get_ons_boundary_json(url)
for feature in feature_collection["features"]:
gss_code = feature["properties"][id_field]
try:
council = Council.objects.get(identifiers__contains=[gss_code])
self.stdout.write(
"Found boundary for %s: %s" % (gss_code, council.name)
)
except Council.DoesNotExist:
self.stderr.write(
"No council object with GSS {} found".format(gss_code)
)
continue
council_geography, _ = CouncilGeography.objects.get_or_create(
council=council
)
council_geography.gss = gss_code
council_geography.geography = self.feature_to_multipolygon(feature)
council_geography.save()
def load_contact_details(self):
return requests.get(settings.EC_COUNCIL_CONTACT_DETAILS_API_URL).json()
def get_council_name(self, council_data):
"""
At the time of writing, the council name can be NULL in the API
meaning we can't rely on the key being populated in all cases.
This is normally only an issue with councils covered by EONI, so if
we see one of them without a name, we assign a hardcoded name.
"""
name = None
if council_data["official_name"]:
name = council_data["official_name"]
else:
if council_data["code"] in NIR_IDS:
name = "Electoral Office for Northern Ireland"
if not name:
raise ValueError("No official name for {}".format(council_data["code"]))
return unescape(name)
def import_councils_from_ec(self):
self.stdout.write("Importing councils...")
bucks_defaults = {
"name": "Buckinghamsh<NAME>",
"electoral_services_email": "<EMAIL> (general enquiries), <EMAIL> (postal vote enquiries), <EMAIL> (proxy vote enquiries), <EMAIL> (overseas voter enquiries)",
"electoral_services_website": "https://www.buckinghamshire.gov.uk/your-council/council-and-democracy/election-and-voting/",
"electoral_services_postcode": "HP19 8FF",
"electoral_services_address": "Electoral Services\r\nBuckinghamshire Council\r\nThe Gateway\r\nGatehouse Road\r\nAylesbury",
"electoral_services_phone_numbers": ["01296 798141"],
"identifiers": ["E06000060"],
"registration_address": None,
"registration_email": "",
"registration_phone_numbers": [],
"registration_postcode": None,
"registration_website": "",
"name_translated": {},
}
bucks_council, created = Council.objects.get_or_create(
council_id="BUC", defaults=bucks_defaults
)
if not created:
for key, value in bucks_defaults.items():
setattr(bucks_council, key, value)
bucks_council.save()
self.seen_ids.add("BUC")
for council_data in self.load_contact_details():
self.seen_ids.add(council_data["code"])
if council_data["code"] in ("CHN", "AYL", "SBU", "WYO"):
continue
council, _ = Council.objects.get_or_create(council_id=council_data["code"])
council.name = self.get_council_name(council_data)
council.identifiers = council_data["identifiers"]
if council_data["electoral_services"]:
electoral_services = council_data["electoral_services"][0]
council.electoral_services_email = electoral_services["email"]
council.electoral_services_address = unescape(
electoral_services["address"]
)
council.electoral_services_postcode = electoral_services["postcode"]
council.electoral_services_phone_numbers = electoral_services["tel"]
council.electoral_services_website = electoral_services[
"website"
].replace("\\", "")
if council_data["registration"]:
registration = council_data["registration"][0]
council.registration_email = registration["email"]
council.registration_address = unescape(registration["address"])
council.registration_postcode = registration["postcode"]
council.registration_phone_numbers = registration["tel"]
council.registration_website = registration["website"].replace("\\", "")
if council.council_id in WELSH_COUNCIL_NAMES:
council.name_translated["cy"] = WELSH_COUNCIL_NAMES[council.council_id]
elif council.name_translated.get("cy"):
del council.name_translated["cy"]
council.save()
def handle(self, **options):
"""
Manually run system checks for the
'councils' and 'pollingstations' apps
Management commands can ignore checks that only apply to
the apps supporting the website part of the project
"""
self.check(
[apps.get_app_config("councils"), apps.get_app_config("pollingstations")]
)
if options["teardown"]:
self.stdout.write("Clearing councils table..")
Council.objects.all().delete()
self.stdout.write("Clearing councils_geography table..")
CouncilGeography.objects.all().delete()
self.seen_ids = set()
self.import_councils_from_ec()
if not options["only_contact_details"]:
self.attach_boundaries(options.get("alt_url"))
# Clean up old councils that we've not seen in the EC data
Council.objects.exclude(council_id__in=self.seen_ids).delete()
self.stdout.write("..done")
| 2.140625
| 2
|
saleor/webhook/observability/payload_schema.py
|
DevPoke/saleor
| 0
|
12688
|
from datetime import datetime
from enum import Enum
from json.encoder import ESCAPE_ASCII, ESCAPE_DCT # type: ignore
from typing import List, Optional, Tuple, TypedDict
class JsonTruncText:
def __init__(self, text="", truncated=False, added_bytes=0):
self.text = text
self.truncated = truncated
self._added_bytes = max(0, added_bytes)
def __eq__(self, other):
if not isinstance(other, JsonTruncText):
return False
return (self.text, self.truncated) == (other.text, other.truncated)
def __repr__(self):
return f'JsonTruncText(text="{self.text}", truncated={self.truncated})'
@property
def byte_size(self) -> int:
return len(self.text) + self._added_bytes
@staticmethod
def json_char_len(char: str) -> int:
try:
return len(ESCAPE_DCT[char])
except KeyError:
return 6 if ord(char) < 0x10000 else 12
@classmethod
def truncate(cls, s: str, limit: int):
limit = max(limit, 0)
s_init_len = len(s)
s = s[:limit]
added_bytes = 0
for match in ESCAPE_ASCII.finditer(s):
start, end = match.span(0)
markup = cls.json_char_len(match.group(0)) - 1
added_bytes += markup
if end + added_bytes > limit:
return cls(
text=s[:start],
truncated=True,
added_bytes=added_bytes - markup,
)
if end + added_bytes == limit:
s = s[:end]
return cls(
text=s,
truncated=len(s) < s_init_len,
added_bytes=added_bytes,
)
return cls(
text=s,
truncated=len(s) < s_init_len,
added_bytes=added_bytes,
)
class ObservabilityEventTypes(str, Enum):
API_CALL = "api_call"
EVENT_DELIVERY_ATTEMPT = "event_delivery_attempt"
HttpHeaders = List[Tuple[str, str]]
class App(TypedDict):
id: str
name: str
class Webhook(TypedDict):
id: str
name: str
target_url: str
subscription_query: Optional[JsonTruncText]
class ObservabilityEventBase(TypedDict):
event_type: ObservabilityEventTypes
class GraphQLOperation(TypedDict):
name: Optional[JsonTruncText]
operation_type: Optional[str]
query: Optional[JsonTruncText]
result: Optional[JsonTruncText]
result_invalid: bool
class ApiCallRequest(TypedDict):
id: str
method: str
url: str
time: float
headers: HttpHeaders
content_length: int
class ApiCallResponse(TypedDict):
headers: HttpHeaders
status_code: Optional[int]
content_length: int
class ApiCallPayload(ObservabilityEventBase):
request: ApiCallRequest
response: ApiCallResponse
app: Optional[App]
gql_operations: List[GraphQLOperation]
class EventDeliveryPayload(TypedDict):
content_length: int
body: JsonTruncText
class EventDelivery(TypedDict):
id: str
status: str
event_type: str
event_sync: bool
payload: EventDeliveryPayload
class EventDeliveryAttemptRequest(TypedDict):
headers: HttpHeaders
class EventDeliveryAttemptResponse(TypedDict):
headers: HttpHeaders
status_code: Optional[int]
content_length: int
body: JsonTruncText
class EventDeliveryAttemptPayload(ObservabilityEventBase):
id: str
time: datetime
duration: Optional[float]
status: str
next_retry: Optional[datetime]
request: EventDeliveryAttemptRequest
response: EventDeliveryAttemptResponse
event_delivery: EventDelivery
webhook: Webhook
app: App
| 2.625
| 3
|
NMTK_apps/NMTK_server/wms/djpaste.py
|
bhargavasana/nmtk
| 0
|
12689
|
# (c) 2013 <NAME> and contributors; written to work with Django and Paste (http://pythonpaste.org)
# Paste CGI "middleware" for Django by <NAME> <<EMAIL>>
# Open Technology Group, Inc <http://www.otg-nc.com>
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
import os
import sys
import subprocess
import urllib
try:
import select
except ImportError:
select = None
from paste.util import converters
from paste.cgiapp import *
from paste.cgiapp import StdinReader, proc_communicate
from paste.cgiapp import CGIApplication as PasteCGIApplication
import urllib
from django.http import HttpResponse
# Taken from http://plumberjack.blogspot.com/2009/09/how-to-treat-logger-like-output-stream.html
import logging
mod_logger=logging.getLogger(__name__)
class LoggerWriter:
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message.strip() and message != '\n':
self.logger.log(self.level, message)
class CGIApplication(PasteCGIApplication):
def __call__(self, request, environ, logger=None):
if not logger:
self.logger=LoggerWriter(logging.getLogger(__name__), logging.ERROR)
else:
self.logger=logger
if 'REQUEST_URI' not in environ:
environ['REQUEST_URI'] = (
urllib.quote(environ.get('SCRIPT_NAME', ''))
+ urllib.quote(environ.get('PATH_INFO', '')))
if self.include_os_environ:
cgi_environ = os.environ.copy()
else:
cgi_environ = {}
for name in environ:
# Should unicode values be encoded?
if (name.upper() == name
and isinstance(environ[name], str)):
cgi_environ[name] = environ[name]
if self.query_string is not None:
old = cgi_environ.get('QUERY_STRING', '')
if old:
old += '&'
cgi_environ['QUERY_STRING'] = old + self.query_string
cgi_environ['SCRIPT_FILENAME'] = self.script
proc = subprocess.Popen(
[self.script],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=cgi_environ,
cwd=os.path.dirname(self.script),
)
writer = CGIWriter()
if select and sys.platform != 'win32':
proc_communicate(
proc,
stdin=request,
stdout=writer,
stderr=self.logger)
else:
stdout, stderr = proc.communicate(request.read())
if stderr:
self.logger.write(stderr)
writer.write(stdout)
if not writer.headers_finished:
return HttpResponse(status=400)
return writer.response
class CGIWriter(object):
def __init__(self):
self.status = '200 OK'
self.headers = []
self.headers_finished = False
self.writer = None
self.buffer = ''
def write(self, data):
if self.headers_finished:
self.response.write(data)
return
self.buffer += data
while '\n' in self.buffer:
if '\r\n' in self.buffer and self.buffer.find('\r\n') < self.buffer.find('\n'):
line1, self.buffer = self.buffer.split('\r\n', 1)
else:
line1, self.buffer = self.buffer.split('\n', 1)
if not line1:
self.headers_finished = True
self.response=HttpResponse(status=int(self.status.split(' ')[0]))
for name, value in self.headers:
self.response[name]=value
self.response.write(self.buffer)
del self.buffer
del self.headers
del self.status
break
elif ':' not in line1:
raise CGIError(
"Bad header line: %r" % line1)
else:
name, value = line1.split(':', 1)
value = value.lstrip()
name = name.strip()
if name.lower() == 'status':
if ' ' not in value:
# WSGI requires this space, sometimes CGI scripts don't set it:
value = '%s General' % value
self.status = value
else:
self.headers.append((name, value))
| 2.390625
| 2
|
Ago-Dic-2019/JOSE ONOFRE/PRACTICAS/Practica1/RestaurantSeat.py
|
Arbupa/DAS_Sistemas
| 41
|
12690
|
<gh_stars>10-100
cantidad= input("Cuantas personas van a cenar?")
cant = int(cantidad)
print(cant)
if cant > 8:
print("Lo siento, tendran que esperar")
else:
print("La mesa esta lista")
| 3.71875
| 4
|
build/piman.app/pysnmp/carrier/asyncore/dgram/base.py
|
jackgisel/team-athens
| 0
|
12691
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pysnmp/license.html
#
import socket
import errno
import sys
from pysnmp.carrier.asyncore.base import AbstractSocketTransport
from pysnmp.carrier import sockfix, sockmsg, error
from pysnmp import debug
# Ignore these socket errors
sockErrors = {errno.ESHUTDOWN: True,
errno.ENOTCONN: True,
errno.ECONNRESET: False,
errno.ECONNREFUSED: False,
errno.EAGAIN: False,
errno.EWOULDBLOCK: False}
if hasattr(errno, 'EBADFD'):
# bad FD may happen upon FD closure on n-1 select() event
sockErrors[errno.EBADFD] = True
class DgramSocketTransport(AbstractSocketTransport):
sockType = socket.SOCK_DGRAM
retryCount = 3
retryInterval = 1
addressType = lambda x: x
def __init__(self, sock=None, sockMap=None):
self.__outQueue = []
self._sendto = lambda s, b, a: s.sendto(b, a)
def __recvfrom(s, sz):
d, a = s.recvfrom(sz)
return d, self.addressType(a)
self._recvfrom = __recvfrom
AbstractSocketTransport.__init__(self, sock, sockMap)
def openClientMode(self, iface=None):
if iface is not None:
try:
self.socket.bind(iface)
except socket.error:
raise error.CarrierError(
'bind() for %s failed: %s' % (iface is None and "<all local>" or iface, sys.exc_info()[1]))
return self
def openServerMode(self, iface):
try:
self.socket.bind(iface)
except socket.error:
raise error.CarrierError('bind() for %s failed: %s' % (iface, sys.exc_info()[1],))
return self
def enableBroadcast(self, flag=1):
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, flag
)
except socket.error:
raise error.CarrierError('setsockopt() for SO_BROADCAST failed: %s' % (sys.exc_info()[1],))
debug.logger & debug.flagIO and debug.logger('enableBroadcast: %s option SO_BROADCAST on socket %s' % (flag and "enabled" or "disabled", self.socket.fileno()))
return self
def enablePktInfo(self, flag=1):
if (not hasattr(self.socket, 'sendmsg') or
not hasattr(self.socket, 'recvmsg')):
raise error.CarrierError('sendmsg()/recvmsg() interface is not supported by this OS and/or Python version')
try:
if self.socket.family == socket.AF_INET:
self.socket.setsockopt(socket.SOL_IP, socket.IP_PKTINFO, flag)
if self.socket.family == socket.AF_INET6:
self.socket.setsockopt(socket.SOL_IPV6, socket.IPV6_RECVPKTINFO, flag)
except socket.error:
raise error.CarrierError('setsockopt() for %s failed: %s' % (self.socket.family == socket.AF_INET6 and "IPV6_RECVPKTINFO" or "IP_PKTINFO", sys.exc_info()[1]))
self._sendto = sockmsg.getSendTo(self.addressType)
self._recvfrom = sockmsg.getRecvFrom(self.addressType)
debug.logger & debug.flagIO and debug.logger('enablePktInfo: %s option %s on socket %s' % (self.socket.family == socket.AF_INET6 and "IPV6_RECVPKTINFO" or "IP_PKTINFO", flag and "enabled" or "disabled", self.socket.fileno()))
return self
def enableTransparent(self, flag=1):
try:
if self.socket.family == socket.AF_INET:
self.socket.setsockopt(
socket.SOL_IP, socket.IP_TRANSPARENT, flag
)
if self.socket.family == socket.AF_INET6:
self.socket.setsockopt(
socket.SOL_IPV6, socket.IPV6_TRANSPARENT, flag
)
except socket.error:
raise error.CarrierError('setsockopt() for IP_TRANSPARENT failed: %s' % sys.exc_info()[1])
except OSError:
raise error.CarrierError('IP_TRANSPARENT socket option requires superusre previleges')
debug.logger & debug.flagIO and debug.logger('enableTransparent: %s option IP_TRANSPARENT on socket %s' % (flag and "enabled" or "disabled", self.socket.fileno()))
return self
def sendMessage(self, outgoingMessage, transportAddress):
self.__outQueue.append(
(outgoingMessage, self.normalizeAddress(transportAddress))
)
debug.logger & debug.flagIO and debug.logger('sendMessage: outgoingMessage queued (%d octets) %s' % (len(outgoingMessage), debug.hexdump(outgoingMessage)))
def normalizeAddress(self, transportAddress):
if not isinstance(transportAddress, self.addressType):
transportAddress = self.addressType(transportAddress)
if not transportAddress.getLocalAddress():
transportAddress.setLocalAddress(self.getLocalAddress())
return transportAddress
def getLocalAddress(self):
# one evil OS does not seem to support getsockname() for DGRAM sockets
try:
return self.socket.getsockname()
except Exception:
return '0.0.0.0', 0
# asyncore API
def handle_connect(self):
pass
def writable(self):
return self.__outQueue
def handle_write(self):
outgoingMessage, transportAddress = self.__outQueue.pop(0)
debug.logger & debug.flagIO and debug.logger('handle_write: transportAddress %r -> %r outgoingMessage (%d octets) %s' % (transportAddress.getLocalAddress(), transportAddress, len(outgoingMessage), debug.hexdump(outgoingMessage)))
if not transportAddress:
debug.logger & debug.flagIO and debug.logger('handle_write: missing dst address, loosing outgoing msg')
return
try:
self._sendto(
self.socket, outgoingMessage, transportAddress
)
except socket.error:
if sys.exc_info()[1].args[0] in sockErrors:
debug.logger & debug.flagIO and debug.logger('handle_write: ignoring socket error %s' % (sys.exc_info()[1],))
else:
raise error.CarrierError('sendto() failed for %s: %s' % (transportAddress, sys.exc_info()[1]))
def readable(self):
return 1
def handle_read(self):
try:
incomingMessage, transportAddress = self._recvfrom(self.socket, 65535)
transportAddress = self.normalizeAddress(transportAddress)
debug.logger & debug.flagIO and debug.logger(
'handle_read: transportAddress %r -> %r incomingMessage (%d octets) %s' % (transportAddress, transportAddress.getLocalAddress(), len(incomingMessage), debug.hexdump(incomingMessage)))
if not incomingMessage:
self.handle_close()
return
else:
self._cbFun(self, transportAddress, incomingMessage)
return
except socket.error:
if sys.exc_info()[1].args[0] in sockErrors:
debug.logger & debug.flagIO and debug.logger('handle_read: known socket error %s' % (sys.exc_info()[1],))
sockErrors[sys.exc_info()[1].args[0]] and self.handle_close()
return
else:
raise error.CarrierError('recvfrom() failed: %s' % (sys.exc_info()[1],))
def handle_close(self):
pass # no datagram connection
| 2.03125
| 2
|
src/coreclr/scripts/superpmi-replay.py
|
JimmyCushnie/runtime
| 2
|
12692
|
#!/usr/bin/env python3
#
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
#
##
# Title : superpmi_setup.py
#
# Notes:
#
# Script to run "superpmi replay" for various collections under various COMPlus_JitStressRegs value.
################################################################################
################################################################################
import argparse
from os import path
import os
from os import listdir
from coreclr_arguments import *
from superpmi_setup import run_command
parser = argparse.ArgumentParser(description="description")
parser.add_argument("-arch", help="Architecture")
parser.add_argument("-platform", help="OS platform")
parser.add_argument("-jit_directory", help="path to the directory containing clrjit binaries")
parser.add_argument("-log_directory", help="path to the directory containing superpmi log files")
jit_flags = [
"JitStressRegs=0",
"JitStressRegs=1",
"JitStressRegs=2",
"JitStressRegs=3",
"JitStressRegs=4",
"JitStressRegs=8",
"JitStressRegs=0x10",
"JitStressRegs=0x80",
"JitStressRegs=0x1000",
]
def setup_args(args):
""" Setup the args for SuperPMI to use.
Args:
args (ArgParse): args parsed by arg parser
Returns:
args (CoreclrArguments)
"""
coreclr_args = CoreclrArguments(args, require_built_core_root=False, require_built_product_dir=False,
require_built_test_dir=False, default_build_type="Checked")
coreclr_args.verify(args,
"arch",
lambda unused: True,
"Unable to set arch")
coreclr_args.verify(args,
"platform",
lambda unused: True,
"Unable to set platform")
coreclr_args.verify(args,
"jit_directory",
lambda jit_directory: os.path.isdir(jit_directory),
"jit_directory doesn't exist")
coreclr_args.verify(args,
"log_directory",
lambda log_directory: True,
"log_directory doesn't exist")
return coreclr_args
def main(main_args):
"""Main entrypoint
Args:
main_args ([type]): Arguments to the script
"""
python_path = sys.executable
cwd = os.path.dirname(os.path.realpath(__file__))
coreclr_args = setup_args(main_args)
spmi_location = path.join(cwd, "artifacts", "spmi")
log_directory = coreclr_args.log_directory
platform_name = coreclr_args.platform
os_name = "win" if platform_name.lower() == "windows" else "unix"
arch_name = coreclr_args.arch
host_arch_name = "x64" if arch_name.endswith("64") else "x86"
jit_path = path.join(coreclr_args.jit_directory, 'clrjit_{}_{}_{}.dll'.format(os_name, arch_name, host_arch_name))
print("Running superpmi.py download")
run_command([python_path, path.join(cwd, "superpmi.py"), "download", "--no_progress", "-target_os", platform_name,
"-target_arch", arch_name, "-core_root", cwd, "-spmi_location", spmi_location], _exit_on_fail=True)
failed_runs = []
for jit_flag in jit_flags:
log_file = path.join(log_directory, 'superpmi_{}.log'.format(jit_flag.replace("=", "_")))
print("Running superpmi.py replay for {}".format(jit_flag))
_, _, return_code = run_command([
python_path, path.join(cwd, "superpmi.py"), "replay", "-core_root", cwd,
"-jitoption", jit_flag, "-jitoption", "TieredCompilation=0",
"-target_os", platform_name, "-target_arch", arch_name,
"-arch", host_arch_name,
"-jit_path", jit_path, "-spmi_location", spmi_location,
"-log_level", "debug", "-log_file", log_file])
if return_code != 0:
failed_runs.append("Failure in {}".format(log_file))
# Consolidate all superpmi_*.logs in superpmi_platform_architecture.log
final_log_name = path.join(log_directory, "superpmi_{}_{}.log".format(platform_name, arch_name))
print("Consolidating final {}".format(final_log_name))
with open(final_log_name, "a") as final_superpmi_log:
for superpmi_log in listdir(log_directory):
if not superpmi_log.startswith("superpmi_Jit") or not superpmi_log.endswith(".log"):
continue
print("Appending {}".format(superpmi_log))
final_superpmi_log.write("======================================================={}".format(os.linesep))
final_superpmi_log.write("Contents from {}{}".format(superpmi_log, os.linesep))
final_superpmi_log.write("======================================================={}".format(os.linesep))
with open(path.join(log_directory, superpmi_log), "r") as current_superpmi_log:
contents = current_superpmi_log.read()
final_superpmi_log.write(contents)
# Log failures summary
if len(failed_runs) > 0:
final_superpmi_log.write(os.linesep)
final_superpmi_log.write(os.linesep)
final_superpmi_log.write("========Failed runs summary========".format(os.linesep))
final_superpmi_log.write(os.linesep.join(failed_runs))
return 0 if len(failed_runs) == 0 else 1
if __name__ == "__main__":
args = parser.parse_args()
sys.exit(main(args))
| 1.609375
| 2
|
parse_cookie.py
|
olnikiforov/hillel
| 0
|
12693
|
<reponame>olnikiforov/hillel
def parse_cookie(query: str) -> dict:
res = {}
if query:
data = query.split(';')
for i in data:
if '=' in i:
res[i.split('=')[0]] = '='.join(i.split('=')[1:])
return res
if __name__ == '__main__':
assert parse_cookie('name=Dima;') == {'name': 'Dima'}
assert parse_cookie('') == {}
assert parse_cookie('name=Dima;age=28;') == {'name': 'Dima', 'age': '28'}
assert parse_cookie('name=Dima=User;age=28;') == {'name': 'Dima=User', 'age': '28'}
| 2.953125
| 3
|
neuralgym/callbacks/model_saver.py
|
pancookie/SNPGAN_TECcompletion
| 1
|
12694
|
<gh_stars>1-10
"""model_saver"""
import os
from . import PeriodicCallback, CallbackLoc
from ..utils.logger import callback_log
class ModelSaver(PeriodicCallback):
"""Save model to file at every pstep step_start.
Args:
pstep (int): Save to model every pstep.
saver: Tensorflow saver.
dump_prefix (str): Prefix for saving model files.
"""
def __init__(self, pstep, saver, dump_prefix, train_spe=None, save_every_epochs=50, op_lr=False, optim=None):
super().__init__(CallbackLoc.step_start, pstep)
self._saver = saver
self._dump_prefix = dump_prefix ; self.train_spe = train_spe ; self.see = save_every_epochs
# self.optim = optim ; self.op_lr = op_lr
# self.best_losses = {}
# self.best_losses['d_loss'] = 999.; self.best_losses['g_loss'] = 999.; self.best_losses['avg_loss'] = 999.
dump_dir = os.path.dirname(self._dump_prefix)
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
callback_log('Initialize ModelSaver: mkdirs %s.' % dump_dir)
'''
# make two folders to save best D, G, and avg loss
self.dump_dir_d = os.path.join(os.path.join(dump_dir, 'best_D'))
if not os.path.exists(self.dump_dir_d):
os.makedirs(self.dump_dir_d)
self.dump_dir_g = os.path.join(os.path.join(dump_dir, 'best_G'))
if not os.path.exists(self.dump_dir_g):
os.makedirs(self.dump_dir_g)
self.dump_dir_avg = os.path.join(os.path.join(dump_dir, 'best_avg'))
if not os.path.exists(self.dump_dir_avg):
os.makedirs(self.dump_dir_avg)
'''
def run(self, sess, step):
'''
if self.op_lr:
g_lr = sess.run(self.optim['g']._lr)
d_lr = sess.run(self.optim['d']._lr)
callback_log('At step {}, lr: g: {}, d: {}.'.format(
step, g_lr, d_lr))
'''
# save the best loss
# save model
if step != 0 and int(step/self.train_spe)%self.see == 0:
callback_log('Trigger ModelSaver: Save model to {}-{}.'.format(
self._dump_prefix, step))
self._saver.save(sess, self._dump_prefix, global_step=step)
| 2.359375
| 2
|
rower_monitor/boat_metrics.py
|
sergiomo/diy-rower-monitor
| 0
|
12695
|
from .time_series import TimeSeries
class BoatModel:
def __init__(self, workout):
self.workout = workout
self.position = TimeSeries()
self.speed = TimeSeries()
def update(self):
"""This function gets called on every flywheel encoder tick."""
pass
class RotatingWheel(BoatModel):
"""A simple model to calculate boat speed and distance traveled. We assume the "boat" is just a wheel moving on
the ground, with the same rotational speed as the rower's flywheel."""
WHEEL_CIRCUMFERENCE_METERS = 1.0
def update(self):
if len(self.position) == 0:
current_position = 0
else:
current_position = self.position.values[-1] + 1.0 / self.workout.machine.num_encoder_pulses_per_revolution
self.position.append(
value=current_position,
timestamp=self.workout.machine.encoder_pulse_timestamps[-1]
)
if len(self.workout.machine.flywheel_speed) > 0:
# Linear speed of a rolling wheel [m/s] = rotational speed [rev/s] * cirumference [m]
boat_speed = self.workout.machine.flywheel_speed.values[-1] * self.WHEEL_CIRCUMFERENCE_METERS
self.speed.append(
value=boat_speed,
timestamp=self.workout.machine.flywheel_speed.timestamps[-1]
)
| 3.59375
| 4
|
trackMe-backend/src/config.py
|
matth3wliuu/trackMe
| 1
|
12696
|
<filename>trackMe-backend/src/config.py
dbConfig = {
"user": "root",
"password": "<PASSWORD>",
"host": "localhost",
"database": "trackMe_dev"
}
| 1.039063
| 1
|
src/localsrv/urls.py
|
vladiibine/localsrv
| 0
|
12697
|
from django.conf.urls import url
from .views import serve_all
urlpatterns = (
url(r'^.*$', serve_all, name="localsrv:serve_all"),
)
| 1.421875
| 1
|
netrd/__init__.py
|
sdmccabe/netrd
| 116
|
12698
|
"""
netrd
-----
netrd stands for Network Reconstruction and Distances. It is a repository
of different algorithms for constructing a network from time series data,
as well as for comparing two networks. It is the product of the Network
Science Insitute 2019 Collabathon.
"""
from . import distance # noqa
from . import reconstruction # noqa
from . import dynamics # noqa
from . import utilities # noqa
| 1.859375
| 2
|
pytorch/GPT.py
|
lyq628/NLP-Tutorials
| 643
|
12699
|
from transformer import Encoder
from torch import nn,optim
from torch.nn.functional import cross_entropy,softmax, relu
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
import torch
import utils
import os
import pickle
class GPT(nn.Module):
def __init__(self, model_dim, max_len, num_layer, num_head, n_vocab, lr, max_seg=3, drop_rate=0.2,padding_idx=0):
super().__init__()
self.padding_idx = padding_idx
self.n_vocab = n_vocab
self.max_len = max_len
self.word_emb = nn.Embedding(n_vocab,model_dim)
self.word_emb.weight.data.normal_(0,0.1)
self.segment_emb = nn.Embedding(num_embeddings= max_seg, embedding_dim=model_dim)
self.segment_emb.weight.data.normal_(0,0.1)
self.position_emb = torch.empty(1,max_len,model_dim)
nn.init.kaiming_normal_(self.position_emb,mode='fan_out', nonlinearity='relu')
self.position_emb = nn.Parameter(self.position_emb)
self.encoder = Encoder(n_head=num_head, emb_dim=model_dim, drop_rate=drop_rate, n_layer=num_layer)
self.task_mlm = nn.Linear(in_features=model_dim, out_features=n_vocab)
self.task_nsp = nn.Linear(in_features=model_dim*self.max_len, out_features=2)
self.opt = optim.Adam(self.parameters(),lr)
def forward(self,seqs, segs, training=False):
embed = self.input_emb(seqs, segs)
z = self.encoder(embed, training, mask = self.mask(seqs)) # [n, step, model_dim]
mlm_logits = self.task_mlm(z) # [n, step, n_vocab]
nsp_logits = self.task_nsp(z.reshape(z.shape[0],-1)) # [n, n_cls]
return mlm_logits, nsp_logits
def step(self, seqs, segs, seqs_, nsp_labels):
self.opt.zero_grad()
mlm_logits, nsp_logits = self(seqs, segs, training=True)
pred_loss = cross_entropy(mlm_logits.reshape(-1,self.n_vocab),seqs_.reshape(-1))
nsp_loss = cross_entropy(nsp_logits,nsp_labels.reshape(-1))
loss = pred_loss + 0.2 * nsp_loss
loss.backward()
self.opt.step()
return loss.cpu().data.numpy(), mlm_logits
def input_emb(self,seqs, segs):
# device = next(self.parameters()).device
# self.position_emb = self.position_emb.to(device)
return self.word_emb(seqs) + self.segment_emb(segs) + self.position_emb
def mask(self, seqs):
device = next(self.parameters()).device
batch_size, seq_len = seqs.shape
mask = torch.triu(torch.ones((seq_len,seq_len), dtype=torch.long), diagonal=1).to(device) # [seq_len ,seq_len]
pad = torch.eq(seqs,self.padding_idx) # [n, seq_len]
mask = torch.where(pad[:,None,None,:],1,mask[None,None,:,:]).to(device) # [n, 1, seq_len, seq_len]
return mask>0 # [n, 1, seq_len, seq_len]
@property
def attentions(self):
attentions = {
"encoder": [l.mh.attention.cpu().data.numpy() for l in self.encoder.encoder_layers]
}
return attentions
def train():
MODEL_DIM = 256
N_LAYER = 4
LEARNING_RATE = 1e-4
dataset = utils.MRPCData("./MRPC",2000)
print("num word: ",dataset.num_word)
model = GPT(
model_dim=MODEL_DIM, max_len=dataset.max_len-1, num_layer=N_LAYER, num_head=4, n_vocab=dataset.num_word,
lr=LEARNING_RATE, max_seg=dataset.num_seg, drop_rate=0.2, padding_idx=dataset.pad_id
)
if torch.cuda.is_available():
print("GPU train avaliable")
device =torch.device("cuda")
model = model.cuda()
else:
device = torch.device("cpu")
model = model.cpu()
loader = DataLoader(dataset,batch_size=32,shuffle=True)
for epoch in range(100):
for batch_idx, batch in enumerate(loader):
seqs, segs,xlen,nsp_labels = batch
seqs, segs,nsp_labels = seqs.type(torch.LongTensor).to(device), segs.type(torch.LongTensor).to(device),nsp_labels.to(device)
# pred: [n, step, n_vocab]
loss,pred = model.step(seqs=seqs[:,:-1], segs= segs[:,:-1], seqs_=seqs[:,1:], nsp_labels=nsp_labels)
if batch_idx %100 == 0:
pred = pred[0].cpu().data.numpy().argmax(axis = 1) # [step]
print(
"Epoch: ",epoch,
"|batch: ", batch_idx,
"| loss: %.3f" % loss,
"\n| tgt: ", " ".join([dataset.i2v[i] for i in seqs[0, 1:].cpu().data.numpy()[:xlen[0].sum()+1]]),
"\n| prd: ", " ".join([dataset.i2v[i] for i in pred[:xlen[0].sum()+1]]),
)
os.makedirs("./visual/models/gpt",exist_ok=True)
torch.save(model.state_dict(),"./visual/models/gpt/model.pth")
export_attention(model,device,dataset)
def export_attention(model,device,data,name="gpt"):
model.load_state_dict(torch.load("./visual/models/gpt/model.pth",map_location=device))
seqs, segs,xlen,nsp_labels = data[:32]
seqs, segs,xlen,nsp_labels = torch.from_numpy(seqs),torch.from_numpy(segs),torch.from_numpy(xlen),torch.from_numpy(nsp_labels)
seqs, segs,nsp_labels = seqs.type(torch.LongTensor).to(device), segs.type(torch.LongTensor).to(device),nsp_labels.to(device)
model(seqs[:,:-1],segs[:,:-1],False)
seqs = seqs.cpu().data.numpy()
data = {"src": [[data.i2v[i] for i in seqs[j]] for j in range(len(seqs))], "attentions": model.attentions}
path = "./visual/tmp/%s_attention_matrix.pkl" % name
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
pickle.dump(data, f)
if __name__ == "__main__":
train()
| 2.515625
| 3
|