max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
transport/tests/uploader_test.py
|
zkdev/cc-utils
| 15
|
12781351
|
import processing.uploaders as uploaders
PREFIX_UPLOADERS = [
{
'context_url': 'registry.local:5000/context-dir',
'prefix': 'registry.local:5000',
'mangle': True,
'expected_target_ref': 'registry.local:5000/registry-source_local:1.2.3',
},
{
'context_url': 'registry.local/context-dir',
'prefix': 'registry.local',
'mangle': False,
'expected_target_ref': 'registry.local/registry-source.local:1.2.3',
},
]
def test_prefix_uploader(job, oci_img):
img1 = oci_img(name='image_name', version='1.2.3', ref='registry-source.local:1.2.3')
job1 = job(oci_img=img1)
results = []
for uploader in PREFIX_UPLOADERS:
examinee = uploaders.PrefixUploader(
context_url=uploader['context_url'],
prefix=uploader['prefix'],
mangle=uploader['mangle'],
)
result = examinee.process(job1, target_as_source=False)
assert result.upload_request.target_ref == uploader['expected_target_ref']
results.append(result)
return results
def test_tag_suffix_uploader(job, oci_img):
for j in test_prefix_uploader(job, oci_img):
examinee = uploaders.TagSuffixUploader(
suffix='mod1',
separator='-',
)
result = examinee.process(j, target_as_source=True)
assert result.upload_request.target_ref == j.upload_request.target_ref + '-mod1'
| 2.21875
| 2
|
tools/spaln/list_spaln_tables.py
|
ic4f/tools-iuc
| 142
|
12781352
|
#!/usr/bin/env python3
import argparse
import shlex
import sys
from subprocess import run
from typing import TextIO
def find_common_ancestor_distance(
taxon: str, other_taxon: str, taxonomy_db_path: str, only_canonical: bool
):
canonical = "--only_canonical" if only_canonical else ""
cmd_str = f"taxonomy_util -d {taxonomy_db_path} common_ancestor_distance {canonical} '{other_taxon}' '{taxon}'"
cmd = shlex.split(cmd_str)
proc = run(cmd, encoding="utf8", capture_output=True)
return proc
def find_distances(gnm2tab_file: TextIO, taxon: str, taxonomy_db_path: str):
cmd = ["taxonomy_util", "-d", taxonomy_db_path, "get_id", taxon]
proc = run(cmd, capture_output=True, encoding="utf8")
if "not found in" in proc.stderr:
exit("Error: " + proc.stderr.strip())
for line in gnm2tab_file:
fields = line.split("\t")
(species_code, settings, other_taxon) = map(lambda el: el.strip(), fields[:3])
proc = find_common_ancestor_distance(taxon, other_taxon, taxonomy_db_path, True)
ancestor_info = proc.stdout.rstrip()
if proc.stderr != "":
print("Warning:", other_taxon, proc.stderr.rstrip(), file=sys.stderr)
else:
proc = find_common_ancestor_distance(
taxon, other_taxon, taxonomy_db_path, False
)
non_canonical_distance = proc.stdout.split("\t")[0]
print(
non_canonical_distance,
ancestor_info,
species_code,
settings,
other_taxon,
sep="\t",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Find distance to common ancestor")
parser.add_argument(
"--taxonomy_db", required=True, help="NCBI Taxonomy database (SQLite format)"
)
parser.add_argument(
"--gnm2tab_file",
required=True,
type=argparse.FileType(),
help="gnm2tab file from spal",
)
parser.add_argument("taxon")
args = parser.parse_args()
find_distances(args.gnm2tab_file, args.taxon, args.taxonomy_db)
| 2.5625
| 3
|
test/test.py
|
franzmueller/analytics-operator-local-adder
| 0
|
12781353
|
<reponame>franzmueller/analytics-operator-local-adder
# Copyright 2020 InfAI (CC SES)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from senergy_local_analytics import Input
import main
class TestMainMethods(unittest.TestCase):
def test_process(self):
adder = main.Adder()
input1 = Input("value1")
input2 = Input("value2")
input1.current_value = 1
input1.current_topic = "test1"
input2.current_value = 3
input2.current_topic = "test2"
output = adder.process([input1, input2])
self.assertTrue(output.send)
self.assertEqual({'sum', 'message_id', 'timestamp'}, output.values.keys())
input1.current_value = 7
input1.current_topic = "test1"
input2.current_value = 4
input2.current_topic = "test3"
output = adder.process([input1, input2])
self.assertEqual(14, output.values["sum"])
| 2.625
| 3
|
cc2541/sensor.py
|
robotice-devices/cc2541-device
| 1
|
12781354
|
<gh_stars>1-10
from cc2541 import CC2541
def get_data(sensor):
name = sensor.get('name')
mac = sensor.get('mac')
data = []
cc2541 = CC2541(mac)
METRIC_STR = "%s.{0}.{0}" % name
data.append((METRIC_STR.format(mac, "temperature"), cc2541.temperature))
data.append((METRIC_STR.format(mac, "humidity"), cc2541.humidity))
data.append((METRIC_STR.format(mac, "magnet"), cc2541.magnet))
data.append((METRIC_STR.format(mac, "barometer"), cc2541.barometer))
data.append((METRIC_STR.format(mac, "gyroscope"), cc2541.gyroscope))
data.append((METRIC_STR.format(mac, "accelerometer"), cc2541.accelerometer))
# ensure that connection is closed
cc2541.close()
return data
| 2.890625
| 3
|
tests/test_client.py
|
hmarment/rested
| 0
|
12781355
|
<reponame>hmarment/rested
import pytest
from rested import Integration, Rested
@pytest.fixture(scope='module')
def setup_integrations():
"""Set up a test resource."""
print('Setting up a test integrations for multiple APIs')
return [Integration(name='myapi1'),
Integration(name='myapi2'),
Integration(name='myapi3')]
@pytest.fixture(scope='module')
def setup_client():
"""Set up a test client."""
print('Setting up a test client for external integrations')
return Rested(integrations=[])
@pytest.fixture(scope='module')
def setup_client_with_multiple_integrations(setup_integrations):
"""Set up a test client."""
print('Setting up a test client with multiple integrations')
return Rested(integrations=setup_integrations)
def test_client(setup_client):
assert isinstance(setup_client, Rested)
def test_client_integrations(setup_client):
assert hasattr(setup_client, 'integrations')
assert isinstance(setup_client.integrations, list)
def test_client_add_integration(setup_client, setup_integration):
setup_client.integrate(setup_integration)
assert len(setup_client.integrations) == 1
assert hasattr(setup_client, setup_integration.name)
def test_client_multiple_integrations(setup_client_with_multiple_integrations):
assert len(setup_client_with_multiple_integrations.integrations) == 3
assert hasattr(setup_client_with_multiple_integrations, 'myapi1') \
and isinstance(
setup_client_with_multiple_integrations.myapi1, Integration) \
and hasattr(setup_client_with_multiple_integrations, 'myapi2') \
and isinstance(
setup_client_with_multiple_integrations.myapi2, Integration) \
and hasattr(setup_client_with_multiple_integrations, 'myapi3') \
and isinstance(
setup_client_with_multiple_integrations.myapi3, Integration)
| 2.453125
| 2
|
pybot/utils/timer.py
|
spillai/pybot
| 78
|
12781356
|
<filename>pybot/utils/timer.py
# Author: <NAME> <<EMAIL>>
# License: MIT
from __future__ import print_function
import time
from collections import OrderedDict
from functools import wraps
def print_green(prt): print("\033[92m {}\033[00m" .format(prt))
global g_timers
g_timers = OrderedDict()
def named_timer(name):
global g_timers
header = '\n' if len(g_timers) == 0 else ''
if name not in g_timers:
g_timers[name] = SimpleTimer(name, header=header)
try:
return g_timers[name]
except KeyError as e:
raise RuntimeError('Failed to retrieve timer {:}'.format(e))
def timeitmethod(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
name = ''.join([args[0].__class__.__name__, '::', func.__name__])
except:
raise RuntimeError('timeitmethod requires first argument to be self')
named_timer(name).start()
r = func(*args, **kwargs)
named_timer(name).stop()
return r
return wrapper
def timeit(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
name = ''.join([func.__name__])
except:
raise RuntimeError('timeitmethod requires first argument to be self')
named_timer(name).start()
r = func(*args, **kwargs)
named_timer(name).stop()
return r
return wrapper
class SimpleTimer:
def __init__(self, name='', hz=0.5, header=''):
self.name_ = name
self.hz_ = hz
self.header_ = header
self.counter_ = 0
self.last_ = time.time()
self.period_ = 0
self.calls_ = 0
self.last_print_ = time.time()
self.last_fps_ = 0
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop(force_print=True)
return self
def poll(self):
self.counter_ += 1
now = time.time()
dt = (now - self.last_)
if (now-self.last_print_) > 1.0 / self.hz_:
T = dt / self.counter_
fps = 1.0 / T
self.calls_ += self.counter_
print_green('{:s}\t[{:5.1f} ms, {:5.1f} Hz, {:d} ]\t{:s}'
.format(self.header_, T * 1e3, fps, int(self.calls_), self.name_))
self.last_ = now
self.last_print_ = now
self.counter_ = 0
def poll_piecemeal(self, force_print=False):
self.counter_ += 1
now = time.time()
dt = (now - self.last_)
self.period_ += dt
if (now-self.last_print_) > 1.0 / self.hz_ or force_print:
T = self.period_ / self.counter_
fps = 1.0 / T
self.calls_ += self.counter_
print_green('{:s}\t[{:5.1f} ms, {:5.1f} Hz, {:d} ]\t{:s}'
.format(self.header_, T * 1e3, fps, int(self.calls_), self.name_))
self.last_ = now
self.last_print_ = now
self.last_fps_ = fps
self.counter_ = 0
self.period_ = 0
def start(self):
self.last_ = time.time()
def stop(self, force_print=False):
self.poll_piecemeal(force_print=force_print)
@property
def fps(self):
return self.last_fps_
| 2.765625
| 3
|
pyboletox/Contracts/Cnab/Retorno/Cnab400/header.py
|
lucasbrahm/pyboletox
| 1
|
12781357
|
<gh_stars>1-10
from abc import ABCMeta, abstractmethod
class Header(metaclass=ABCMeta):
@abstractmethod
def getOperacaoCodigo(self):
pass
@abstractmethod
def getOperacao(self):
pass
@abstractmethod
def getServicoCodigo(self):
pass
@abstractmethod
def getServico(self):
pass
@abstractmethod
def getAgencia(self):
pass
@abstractmethod
def getAgenciaDv(self):
pass
@abstractmethod
def getConta(self):
pass
@abstractmethod
def getContaDv(self):
pass
@abstractmethod
def getData(self, format='%d/%m/%Y'):
pass
@abstractmethod
def getConvenio(self):
pass
@abstractmethod
def getCodigoCliente(self):
pass
@abstractmethod
def toDict(self):
pass
| 3.140625
| 3
|
rcs_back/containers_app/migrations/0034_auto_20210826_2017.py
|
e-kondr01/rcs_back
| 0
|
12781358
|
<reponame>e-kondr01/rcs_back<filename>rcs_back/containers_app/migrations/0034_auto_20210826_2017.py<gh_stars>0
# Generated by Django 3.2.5 on 2021-08-26 17:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('containers_app', '0033_auto_20210826_2005'),
]
operations = [
migrations.RemoveField(
model_name='container',
name='avg_fill_time',
),
migrations.RemoveField(
model_name='container',
name='avg_takeout_wait_time',
),
]
| 1.414063
| 1
|
util.py
|
cmd410/VRoidBonesRenamer
| 12
|
12781359
|
<filename>util.py
import bpy
def unique_constraint(bone, t):
for constraint in bone.constraints:
if constraint.type == t:
return constraint
constraint = bone.constraints.new(type=t)
return constraint
def get_children(parent):
l = []
for obj in bpy.context.scene.objects:
if obj.name == parent.name: continue
if obj.parent is None: continue
if obj.parent.name == parent.name:
l.append(obj)
return l
def bone_has_effect(bone):
'''Check if bone has vertex groups attached to it'''
armature = bpy.context.object
children = get_children(armature)
for obj in children:
me = obj.data
vg_id = None
for i in obj.vertex_groups:
if i.name == bone.name:
vg_id = i.index
break
if vg_id is None:
continue
for vertex in me.vertices:
if i.index in list([vg.group for vg in vertex.groups]):
return True
return False
def get_pose_bone(bone_name):
pose_bones = bpy.context.object.pose.bones
bone = None
if bone_name in pose_bones:
bone = pose_bones[bone_name]
elif '_' not in bone_name:
for b in pose_bones:
if b.name.endswith(f'_{bone_name}'):
bone = b
break
else:
name, side = bone_name.split('_')
if side not in {'L', 'R'}:
for b in pose_bones:
if b.name.endswith(f'_{name}'):
bone = b
break
for b in pose_bones:
if b.name.endswith(f'_{side}_{name}'):
bone = b
break
return bone
| 2.515625
| 3
|
6.whileloops/challenge3_rouillonh.py
|
rouillonh/ChallengePython
| 0
|
12781360
|
<reponame>rouillonh/ChallengePython<filename>6.whileloops/challenge3_rouillonh.py
#Importamos la libreria time
from time import time
print("\tWelcome to the Prime Number App")
#Creamos esta variable para el bucle sin fin
band = True
while band:
print("\nEnter 1 to determine if a specific number is prime.")
print("Enter 2 to determine all prime numbers within a set range.")
choice = input("Enter your choice 1 or 2: ")
#Ponemos las condicionales entre las opciones
if choice == "1":
num = int(input("\nEnter a number to determine if it is prime or not: "))
if num>=2:
prime_status = True
for i in range(2,num):
if num%i==0 or num<2:
prime_status = False
if prime_status == True:
print(num," is prime")
elif prime_status == False:
print(num," is not prime")
elif num<2 and num>=0:
print(num," is not prime")
if choice == "2":
n1 = int(input("\nEnter the lower bound of your range: "))
n2 = int(input("Enter the upper bound of your range: "))
primos = []
start_time = time()
for i in range(n1,n2):
prime_status = True
if n1>=1:
for j in range(2,i):
if i ==j:
break
elif i%j==0:
prime_status=False
else:
continue
if prime_status == True:
primos.append(i)
if i == 1:
primos.remove(i)
end_time = time()
delta_time = end_time - start_time
print("\nCalculations took a total of ",round(delta_time,5) ," seconds.")
print("The following numbers between ",n1," and ",n2 ," are prime:")
for i in primos:
print(i)
print("Press enter to continue")
input()
elif choice != "1" and choice != "2":
print("\nThat is not a valid option.")
c = input("Would you like to run the program again (y/n): ").lower()
#Preguntamos si quiere continuar con el programa
if c == 'y':
continue
elif c == 'n':
print("\nThank you for using the program. Have a nice day.")
break
| 4.125
| 4
|
Lib/site-packages/deriva/qt/common/table_widget.py
|
fochoao/cpython
| 0
|
12781361
|
from PyQt5.QtWidgets import QTableWidget
class TableWidget(QTableWidget):
def __init__(self, parent):
super(QTableWidget, self).__init__(parent)
def getCurrentTableRow(self):
row = self.currentRow()
if row == -1 and self.rowCount() > 0:
row = 0
return row
def getCurrentTableItemTextByName(self, column_name):
row = self.getCurrentTableRow()
return self.getTableItemTextByName(row, column_name)
def getTableItemTextByName(self, row, column_name):
item = self.getTableItemByName(row, column_name)
return item.text() if item else ""
def getTableItemByName(self, row, column_name):
column = None
header_count = self.columnCount()
# noinspection PyTypeChecker
for column in range(header_count):
header_text = self.horizontalHeaderItem(column).text()
if column_name == header_text:
break
if row is None or column is None:
return None
return self.item(row, column)
| 3.0625
| 3
|
setup.py
|
coisme/nanoleaf
| 8
|
12781362
|
<reponame>coisme/nanoleaf<filename>setup.py
from setuptools import setup
import subprocess
gitVersion = subprocess.check_output("git tag -l --points-at HEAD".split()).decode('UTF-8').strip()
setup(
name='nanoleaf',
packages=['nanoleaf'],
version=gitVersion,
description='Python interface for Nanoleaf Aurora.',
long_description=open('README.md', 'r').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/software-2/nanoleaf',
download_url='https://github.com/software-2/nanoleaf/archive/' + gitVersion + '.tar.gz',
keywords=['nanoleaf', 'aurora', 'lighting', 'openAPI'],
classifiers=[
'Topic :: Home Automation',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
],
install_requires=['requests']
)
| 1.28125
| 1
|
OmniMarkupLib/Renderers/MediaWikiRenderer.py
|
henumohe/OmniMarkupPreviewer
| 476
|
12781363
|
<filename>OmniMarkupLib/Renderers/MediaWikiRenderer.py<gh_stars>100-1000
from .base_renderer import *
import os.path
__file__ = os.path.normpath(os.path.abspath(__file__))
__path__ = os.path.dirname(__file__)
@renderer
class MediaWikiRenderer(CommandlineRenderer):
def __init__(self):
super(MediaWikiRenderer, self).__init__(
executable='ruby',
args=['-rubygems', os.path.join(__path__, 'bin/mw2html.rb')])
@classmethod
def is_enabled(cls, filename, syntax):
if syntax == 'text.html.mediawiki':
return True
return filename.endswith('.mediawiki') or filename.endswith('.wiki')
| 2.34375
| 2
|
Python/Battery_Full_Charged_Notifier/battery_full_charged_notifier.pyw
|
iamakkkhil/Rotten-Scripts
| 1,127
|
12781364
|
import psutil #Library to get System details
import time
import pyttsx3 # Library for text to speech Offline
from win10toast import ToastNotifier # also need to install win32api (This is for Notifications)
import threading # To make notification and speech work at same time
toaster = ToastNotifier()
x=pyttsx3.init()
x.setProperty('rate',130)
x.setProperty('volume',8)
count = 0
def show_notification(show_text):
toaster.show_toast(show_text,
icon_path='battery.ico',
duration=10)
# loop the toaster over some period of time
while toaster.notification_active():
time.sleep(0.1)
def monitor():
while (True):
time.sleep(10)
battery = psutil.sensors_battery()
plugged = battery.power_plugged
percent = int(battery.percent)
if percent == 100:
if plugged == True:
processThread = threading.Thread(target=show_notification, args=("Laptop Fully Charged",)) # <- note extra ','
processThread.start()
x.say("Laptop is Fully Charged Please plug out the cable")
x.runAndWait()
elif percent == 90:
if plugged == True:
if count == 0:
processThread = threading.Thread(target=show_notification, args=("Your Battery at 90% Please plug out the cable",)) # <- note extra ','
processThread.start()
x.say("Your battery at 90% ")
x.runAndWait()
count = count + 1
if __name__ == "__main__":
monitor()
| 3.296875
| 3
|
akanda/router/drivers/ping.py
|
fzylogic/akanda-appliance
| 0
|
12781365
|
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import netaddr
from akanda.router.drivers import base
from akanda.router import utils
LOG = logging.getLogger(__name__)
class PingManager(base.Manager):
"""
A class which provide a facade to the system ping utility. Supports both
IPv4 and IPv6.
"""
exe_map = {
4: '/bin/ping',
6: '/bin/ping6'
}
def __init__(self, root_helper='sudo'):
"""
Initializes PingManager class.
:type root_helper: str
:param root_helper: System utility to escalate privileges.
"""
super(PingManager, self).__init__(root_helper)
def do(self, ip):
"""
Sends a single ICMP packet to <ip> using the systems ping utility.
:type ip: str
:param ip: The IP address to send ICMP packets to.
:rtype: bool. If <ip> responds to the ICMP packet, returns True else,
returns False
"""
version = netaddr.IPAddress(ip).version
args = ['-c', '1', ip]
try:
utils.execute([self.exe_map.get(version)] + args)
return True
except RuntimeError:
return False
| 2.203125
| 2
|
quantumflow/transform.py
|
BastianZim/quantumflow-dev
| 51
|
12781366
|
# Copyright 2019-, <NAME> and the QuantumFlow contributors
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
QuantumFlow: Translate, transform, and compile circuits.
"""
# Note: Beta Prototype
from typing import Callable, Generator, Set, Tuple
from .circuits import Circuit
from .dagcircuit import DAGCircuit
from .info import almost_identity
from .ops import Gate, Operation
from .stdgates import CZ, ZZ, H, XPow, YPow, ZPow
from .translate import (
circuit_translate,
translate_ccnot_to_cnot,
translate_cnot_to_cz,
translate_cphase_to_zz,
translate_cswap_to_ccnot,
translate_hadamard_to_zxz,
translate_invt_to_tz,
translate_invv_to_tx,
translate_t_to_tz,
translate_tx_to_zxzxz,
translate_v_to_tx,
translate_zz_to_cnot,
)
# FIXME: transpile instead of compile?
def compile_circuit(circ: Circuit) -> Circuit:
"""Compile a circuit to standard gate set (CZ, X^0.5, ZPow),
simplifying circuit where possible.
"""
# FIXME: Should be automagic translations
# Convert multi-qubit gates to CZ gates
trans = [
translate_cswap_to_ccnot,
translate_ccnot_to_cnot,
translate_cphase_to_zz,
translate_cnot_to_cz,
translate_t_to_tz,
translate_invt_to_tz,
translate_zz_to_cnot,
translate_v_to_tx,
translate_invv_to_tx,
]
circ = circuit_translate(circ, trans)
dagc = DAGCircuit(circ)
remove_identites(dagc)
merge_hadamards(dagc)
convert_HZH(dagc)
# Standardize 1-qubit gates
circ = Circuit(dagc)
circ = circuit_translate(circ, [translate_hadamard_to_zxz])
circ = circuit_translate(circ, [translate_tx_to_zxzxz], recurse=False)
# Gather and merge ZPow gates
dagc = DAGCircuit(circ)
retrogress_tz(dagc)
merge_tz(dagc)
remove_identites(dagc)
circ = Circuit(dagc)
return circ
def find_pattern(
dagc: DAGCircuit,
gateset1: Set,
gateset2: Set,
) -> Generator[Tuple[Operation, Operation], None, None]:
"""Find where a gate from gateset1 is followed by a gate from gateset2 in
a DAGCircuit"""
for elem2 in dagc:
if type(elem2) not in gateset2:
continue
for q2 in elem2.qubits:
elem1 = dagc.prev_element(elem2, q2)
if type(elem1) not in gateset1:
continue
yield (elem1, elem2)
def remove_element(dagc: DAGCircuit, elem: Operation) -> None:
"""Remove a node from a DAGCircuit"""
for qubit in elem.qubits:
prv = dagc.prev_element(elem, qubit)
nxt = dagc.next_element(elem, qubit)
dagc.graph.add_edge(prv, nxt, key=qubit)
dagc.graph.remove_node(elem)
def remove_identites(dagc: DAGCircuit) -> None:
"""Remove identities from a DAGCircuit"""
for elem in dagc:
if isinstance(elem, Gate) and almost_identity(elem):
remove_element(dagc, elem)
def merge_hadamards(dagc: DAGCircuit) -> None:
"""Merge and remove neighboring Hadamard gates"""
for elem1, elem2 in find_pattern(dagc, {H}, {H}):
remove_element(dagc, elem1)
remove_element(dagc, elem2)
def merge_tx(dagc: DAGCircuit) -> None:
"""Merge neighboring ZPow gates"""
_merge_turns(dagc, XPow)
def merge_ty(dagc: DAGCircuit) -> None:
"""Merge neighboring ZPow gates"""
_merge_turns(dagc, YPow)
def merge_tz(dagc: DAGCircuit) -> None:
"""Merge neighboring ZPow gates"""
_merge_turns(dagc, ZPow)
def _merge_turns(dagc: DAGCircuit, gate_class: Callable) -> None:
for gate0, gate1 in find_pattern(dagc, {gate_class}, {gate_class}):
t = gate0.param("t") + gate1.param("t")
(qubit,) = gate0.qubits
gate = gate_class(t, qubit)
prv = dagc.prev_element(gate0)
nxt = dagc.next_element(gate1)
dagc.graph.add_edge(prv, gate, key=qubit)
dagc.graph.add_edge(gate, nxt, key=qubit)
dagc.graph.remove_node(gate0)
dagc.graph.remove_node(gate1)
def retrogress_tz(dagc: DAGCircuit) -> None:
"""Commute ZPow gates as far backward in the circuit as possible"""
G = dagc.graph
again = True
while again:
again = False
for elem1, elem2 in find_pattern(dagc, {ZZ, CZ}, {ZPow}):
(q,) = elem2.qubits
elem0 = dagc.prev_element(elem1, q)
elem3 = dagc.next_element(elem2, q)
G.remove_edge(elem0, elem1, q)
G.remove_edge(elem1, elem2, q)
G.remove_edge(elem2, elem3, q)
G.add_edge(elem0, elem2, key=q)
G.add_edge(elem2, elem1, key=q)
G.add_edge(elem1, elem3, key=q)
again = True
# TODO: Rename? merge_hzh
# TODO: larger pattern, simplifying sequences of 1-qubit Clifford gates
def convert_HZH(dagc: DAGCircuit) -> None:
"""Convert a sequence of H-ZPow-H gates to a XPow gate"""
for elem2, elem3 in find_pattern(dagc, {ZPow}, {H}):
elem1 = dagc.prev_element(elem2)
if not isinstance(elem1, H):
continue
prv = dagc.prev_element(elem1)
nxt = dagc.next_element(elem3)
t = elem2.param("t")
(q0,) = elem2.qubits
gate = XPow(t, q0)
dagc.graph.remove_node(elem1)
dagc.graph.remove_node(elem2)
dagc.graph.remove_node(elem3)
dagc.graph.add_edge(prv, gate, key=q0)
dagc.graph.add_edge(gate, nxt, key=q0)
# fin
| 1.984375
| 2
|
Diversity/accounts/admin.py
|
IAmAngelLiu/shellhack2021
| 0
|
12781367
|
<filename>Diversity/accounts/admin.py
from django.contrib import admin
# Register your models here.
from .models import Employee
from .models import Data
from .models import Points
admin.site.register(Employee)
admin.site.register(Data)
admin.site.register(Points)
| 1.507813
| 2
|
setup.py
|
damcio/-Bio-Projekt-semestralny
| 0
|
12781368
|
<filename>setup.py<gh_stars>0
#!/usr/bin/env python
from setuptools import setup
setup(name='bio',
version='0.1',
description='Bioinformatics project',
author='<NAME>',
setup_requires=['pytest-runner'],
tests_require=['pytest']
)
| 0.910156
| 1
|
A1014280203/7/7.py
|
saurabh896/python-1
| 3,976
|
12781369
|
import os
code_lines = list()
notation_lines = list()
blank_lines = list()
def process_file(filename):
global code_lines
global notation_lines
global blank_lines
with open(filename, 'r') as file:
for line in file.readlines():
_line = line.strip()
if not _line:
blank_lines.append(_line)
elif _line.startswith('#'):
notation_lines.append(_line)
else:
code_lines.append(_line)
def show_result():
global code_lines
global notation_lines
global blank_lines
print('-'*20)
print('code:', len(code_lines))
for line in code_lines:
print(line)
print('-' * 20)
print('notation:', len(notation_lines))
for line in notation_lines:
print(line)
print('-' * 20)
print('blank:', len(blank_lines))
code_lines.clear()
notation_lines.clear()
blank_lines.clear()
def process_files(path='../6'):
files = os.listdir(path)
for file in files:
if file.endswith('.py'):
print('='*30)
print('current file:', os.path.join(path, file))
process_file(os.path.join(path, file))
show_result()
process_files()
| 3.078125
| 3
|
SampleProject/POMObjectDemo/Pages/loginPage.py
|
MelissaQA/selenium-py-example
| 0
|
12781370
|
from ..Locators.locators import Locators
class LoginPage():
def __init__(self, driver):
self.driver = driver
self.username_textbox_id = Locators.username_textbox_id
self.password_textbox_id = Locators.password_textbox_id
self.login_button_id = Locators.login_button_id
self.invalidUserName_message_xpath = '//*[@id="spanMessage"]'
#Actions that can be done on page objects
def enter_username(self, username):
self.driver.find_element_by_id(self.username_textbox_id).clear()
self.driver.find_element_by_id(self.username_textbox_id).send_keys(username)
def enter_password(self, password):
self.driver.find_element_by_id(self.password_textbox_id).clear()
self.driver.find_element_by_id(self.password_textbox_id).send_keys(password)
def click_login(self):
self.driver.find_element_by_id(self.login_button_id).click()
def check_invalid_username_message(self):
msg = self.driver.find_element_by_xpath(self.invalidUserName_message_xpath).text
return msg
| 3.015625
| 3
|
main.py
|
mftnakrsu/Comparison-of-OCR
| 10
|
12781371
|
from ocr import OCR
ocr=OCR(image_folder="test/")
if __name__ == "__main__":
ocr.keras_ocr_works()
ocr.easyocr_model_works()
ocr.pytesseract_model_works()
| 1.65625
| 2
|
pygraff/generator.py
|
cryptolake/Graff
| 1
|
12781372
|
<reponame>cryptolake/Graff<filename>pygraff/generator.py
from bs4 import BeautifulSoup
from pathlib import Path
def gen_prevs(paths, title_tag, preview_tag, max_char, posts_dir):
"""generate post preview from each blog file
:returns: list of post previews
"""
posts = []
for page in paths:
linkp = str(page)
posts.append([Path(page).read_text(), linkp[linkp.find(posts_dir):]])
previews = []
for post in posts:
soup = BeautifulSoup(post[0], 'html.parser')
prev = soup.p
title = soup.title
link = soup.new_tag("a", href=post[1])
# replace title tag with new tag
title.name = title_tag
# replace p tag with the new tag
prev.name = preview_tag
link.extend(title)
# summarization of p
max_char = int(max_char)
char_len = len(prev.string)
if char_len > max_char:
prev.string = prev.string[0:max_char]
prev.string = prev.string+"..."
previews.append([link, prev])
return previews
def writer(previews, blog_file, blog_class):
"""Write the previews to the selecte file
"""
soup = BeautifulSoup(blog_file, 'html.parser')
_class_ = soup.find(class_=blog_class)
_class_.clear()
for preview in previews:
li = soup.new_tag('li')
li.extend(preview)
_class_.append(li)
return soup.prettify()
def new(title, web_dir, posts_dir, post_template):
""" generate new blog page and put title there
"""
temp_path = Path(web_dir+post_template)
template = temp_path.read_text()
soup = BeautifulSoup(template, 'html.parser')
name = soup.title
name.clear()
name.extend(title)
new_file = "/"+title+".html"
new_path = web_dir+posts_dir+new_file
Path(new_path).touch()
Path(new_path).write_text(str(soup))
return new_path
| 2.875
| 3
|
src/inbox/pipelines.py
|
Eforcers/inbox-cleaner
| 1
|
12781373
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from datetime import datetime
import logging
import constants
from inbox.helpers import IMAPHelper
from inbox.models import MoveMessageProcess
from livecount import counter
import pipeline
from pipeline.common import List
from models import MoveProcess, MoveUserProcess
from tasks import get_messages
class MoveProcessPipeline(pipeline.Pipeline):
def run(self, move_process_id):
logging.info("start process for move_process %s", move_process_id)
process = MoveProcess.get_by_id(move_process_id)
emails = process.emails
user_processes = []
for email in emails:
user_process = MoveUserProcess(
user_email=email,
move_process_key=process.key,
status=constants.STARTED
)
user_process_key = user_process.put()
user_process_id = user_process_key.id()
user_processes.append((yield MoveUserProcessPipeline(
user_process_id=user_process_id, tag=process.tag)))
yield List(*user_processes)
def finalized(self):
move_process_id = self.kwargs.get('move_process_id')
logging.info('Finishing process [%s]', move_process_id)
process = MoveProcess.get_by_id(move_process_id)
process.status = constants.FINISHED
process.execution_finish = datetime.now()
process.put()
class MoveUserProcessPipeline(pipeline.Pipeline):
def run(self, user_process_id=None, tag=None):
user_process = MoveUserProcess.get_by_id(user_process_id)
try:
messages = get_messages(user_process, tag)
message_processes = []
counter.reset_counter(
'%s_%s_ok_counter' % (
user_process.user_email, user_process.key.id()))
counter.reset_counter(
'%s_%s_error_counter' % (
user_process.user_email, user_process.key.id()))
for batch in messages:
message_processes.append(
(yield MoveBatchMessagesProcessPipeline(
batch=batch, user_process_id=user_process_id)))
yield List(*message_processes)
except Exception as e:
if self.current_attempt >= self.max_attempts:
logging.exception('Failed definitely retrieving for [%s] '
'messages', user_process.user_email)
user_process.status = constants.FAILED
user_process.error_description = e.message
user_process.put()
else:
logging.exception('Failed retrieving messagesfor [%s], '
'try again...', user_process.user_email)
raise e
def finalized(self):
user_process_id = self.kwargs.get('user_process_id')
logging.info('Finishing user process [%s]', user_process_id)
user_process = MoveUserProcess.get_by_id(user_process_id)
if not self.was_aborted:
user_process.status = constants.FINISHED
user_process.error_description = None
user_process.put()
class MoveBatchMessagesProcessPipeline(pipeline.Pipeline):
def run(self, batch=None, user_process_id=None):
user_process = MoveUserProcess.get_by_id(user_process_id)
move_process = user_process.move_process_key.get()
failed_messages = []
#Create connection
imap = IMAPHelper()
imap.oauth1_2lo_login(user_email=user_process.user_email)
for msg_id in batch:
message_process = MoveMessageProcess(
email=user_process.user_email,
message_id=msg_id,
user_process_key=user_process.key,
status=constants.STARTED)
message_process_key = message_process.put()
try:
move_message(user_process=user_process,
message_process_id=message_process_key.id(),
label=move_process.tag, imap=imap)
except Exception as e:
logging.exception(
'Failed while moving message [%s] for user [%s], '
'try again...', msg_id, user_process.user_email)
failed_messages.append(
(yield MoveMessageProcessPipeline(
message_process_id=message_process_key.id(),
move_process_id=move_process.key.id()))
)
imap.close()
class MoveMessageProcessPipeline(pipeline.Pipeline):
def run(self, message_process_id=None, user_process_id=None,
move_process_id=None):
move_process = MoveProcess.get_by_id(move_process_id)
user_process = MoveUserProcess.get_by_id(user_process_id)
try:
move_message(user_process=user_process,
message_process_id=message_process_id,
label=move_process.tag)
except Exception as e:
if self.current_attempt >= self.max_attempts:
logging.exception(
'Failed definitely moving the message id [%s] for user [%s] messages',
message_process_id, user_process.user_email)
message_process = MoveMessageProcess.get_by_id(
message_process_id)
message_process.status = constants.FAILED
message_process.error_description = e.message
message_process.put()
counter.load_and_increment_counter(
'%s_%s_error_counter' % (
user_process.user_email, user_process.key.id()))
else:
logging.exception(
'Failed retrieving a messagee id [%s] for [%s], '
'try again...', message_process_id, user_process.user_email)
raise e
| 2.0625
| 2
|
daily_messages/daily_messages.py
|
rohinigopalqxf2/qxf2-lambdas
| 3
|
12781374
|
<gh_stars>1-10
"""
Get messages for employees from daily-messages.qxf2.com
And post to Skype Sender
"""
import boto3
import requests
BASE_URL = 'http://daily-messages.qxf2.com'
QUEUE_URL = 'https://sqs.ap-south-1.amazonaws.com/285993504765/skype-sender'
def clean_message(message):
"Clean up the message received"
message = message.replace("'", '-')
message = message.replace('"', '-')
return message
def get_message(endpoint):
"Get a message for employees"
response = requests.get(url=BASE_URL+endpoint)
return clean_message(response.json()['msg'])
def write_message(daily_message, channel):
"Send a message to Skype Sender"
sqs = boto3.client('sqs')
message = str({'msg':f'{daily_message}', 'channel':channel})
sqs.send_message(QueueUrl=QUEUE_URL, MessageBody=(message))
def lambda_handler(event, context):
"Lambda entry point"
#This lambda expects an event of type {'endpoint':'/blah','channel':'blah'}
message = get_message(event.get('endpoint'))
write_message(message, event.get('channel','test'))
| 2.859375
| 3
|
chef_admin/apps.py
|
pradeepdhankhar/chef24x7
| 0
|
12781375
|
<gh_stars>0
from django.apps import AppConfig
class ChefAdminConfig(AppConfig):
name = 'chef_admin'
| 1.054688
| 1
|
setup.py
|
hanshoi/kapsi-git-manager
| 0
|
12781376
|
import os
from setuptools import setup
def read(fname):
"""
Read README.md as long description if found.
Otherwise just return short description.
"""
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return "Simple git management application to be used in Kapsi hosting."
setup(
name="kapsi_git_manager",
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
description=("Simple git management application to be used in Kapsi hosting."),
license="MIT",
keywords="git management kapsi",
url="http://packages.python.org/kapsi_git_manager",
packages=['kapsi_git_manager', ],
package_data={'kapsi_git_manager': ['license.txt', 'templates/*.html']},
include_package_data=True,
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
install_requires=[
'Flask',
'flup<=1.0.2',
'Flask-HTTPAuth',
'GitPython',
'passlib'
],
)
| 1.789063
| 2
|
LocalGP.py
|
nick-terry/Splitting-GP
| 1
|
12781377
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 7 10:57:53 2020
@author: pnter
"""
import torch
import gpytorch
# from gpytorch.utils.memoize import add_to_cache, is_in_cache
from gpytorch.lazy.root_lazy_tensor import RootLazyTensor
import copy
from UtilityFunctions import updateInverseCovarWoodbury
from math import inf
'''
Implements the Local Gaussian Process Regression Model as described by Nguyen-tuong et al.
Note that the kernel used in the original paper Local Gaussian Process Regression for Real Time Online Model Learning uses the RBF kernel
Parameters:
likelihoodFn: The function which, when called, instantiates a new likelihood of the type which should be used for all child models
kernel: The kernel function used to construct the covariances matrices
w_gen: The threshold distance for generation of a new child model
'''
class LocalGPModel:
def __init__(self, likelihoodFn, kernel, inheritKernel=True, **kwargs):
#Initialize a list to contain local child models
self.children = []
self.w_gen = kwargs['w_gen'] if 'w_gen' in kwargs else .5
self.covar_module = kernel
self.mean_module = kwargs['mean'] if 'mean' in kwargs else gpytorch.means.ConstantMean
self.likelihood = likelihoodFn
self.inheritKernel = inheritKernel
#Number of training iterations used each time child model is updated
#This should be roughly proportional to the number of observations.
#By default, we will use 30. As number of data goes up, this may increase
self.training_iter = 30
#Default output dimension is 1 (scalar)
self.outputDim = 1 if 'outputDim' not in kwargs else kwargs['outputDim']
#If numInducingInputs is given, use variational GP models for child models
if 'numInducingPoints' in kwargs:
self.numInducingPoints = kwargs['numInducingPoints']
assert(type(self.numInducingPoints)==int)
assert(self.numInducingPoints>0)
self.objectiveFunctionClass = gpytorch.mlls.VariationalELBO
else:
self.numInducingPoints = None
#If maxChildren in kwargs, set self.maxChildren. Else, set to inf
if 'maxChildren' in kwargs:
self.maxChildren = kwargs['maxChildren']
else:
self.maxChildren = inf
#If M=# of closest models for prediction is given, set parameter
if 'M' in kwargs:
self.M = kwargs['M']
else:
self.M = None
'''
Update the LocalGPModel with a pair {x,y}.
'''
def update(self, x, y):
#If no child model have been created yet, instantiate a new child with {x,y} and record the output dimension
if len(self.children)==0:
self.createChild(x,y)
self.outputDim = int(y.shape[-1])
#If child models exist, find the the child whose center is closest to x
else:
closestChildIndex,minDist = self.getClosestChild(x)
#Get the mask of any points for which the closest model is not similar enough
genNewModelIndices = (minDist < self.w_gen) if minDist.dim()>0 else (minDist < self.w_gen).unsqueeze(0)
x_gen = x[genNewModelIndices,:]
y_gen = y[genNewModelIndices]
#Now generate a new model, if needed.
if x_gen.shape[0] > 0:
self.createChild(x_gen[0,:].unsqueeze(0), y_gen[0].unsqueeze(0))
#We then recursively call update() without the point which generated
#the model and return, in case some points would be assigned the newly generated model
if x.shape[0] > 1:
x_minus = torch.cat([x[0:genNewModelIndices[0]], x[genNewModelIndices[0]:]])
y_minus = torch.cat([y[0:genNewModelIndices[0]], y[genNewModelIndices[0]:]])
self.update(x_minus,y_minus)
return
#Get points where we are not generating a new model
x_assign = x[genNewModelIndices.bitwise_not()]
y_assign = y[genNewModelIndices.bitwise_not()]
closestIndex_assign = closestChildIndex[genNewModelIndices.bitwise_not()]\
if closestChildIndex.dim()>0 else closestChildIndex.unsqueeze(0)[genNewModelIndices.bitwise_not()]
#loop over children and assign them the new data points
for childIndex in range(len(self.children)):
#Get the data which are closest to the current child
x_child = x_assign[closestIndex_assign==childIndex].squeeze(0)
y_child = y_assign[closestIndex_assign==childIndex].squeeze(0)
#If new data is a singleton, unsqueeze the 0th dim
if x_child.dim() == 1:
x_child,y_child = x_child.unsqueeze(0),y_child.unsqueeze(0)
#Only proceed if there are some data in the batch assigned to the child
if x_child.shape[0] > 0:
closestChildModel = self.children[childIndex]
#Create new model(s) which additionally incorporates the pair {x,y}. This will return more than one model
#if a split occurs.
newChildModel = closestChildModel.update(x_child,y_child)
#Replace the existing model with the new model which incorporates new data
self.children[closestIndex_assign] = newChildModel
'''
Instantiate a new child model using the training pair {x,y}
Note that the likelihood used to instantiate the child model is distinct
from each other child model, as opposed to the kernel which is shared
between the children.
'''
def createChild(self,x,y):
#Create new child model, then train
if self.numInducingPoints is None:
newChildModel = LocalGPChild(x,y,self,self.inheritKernel)
else:
newChildModel = ApproximateGPChild(x,y,self,self.inheritKernel)
#Set other children to not be last updated.
self.setChildLastUpdated(newChildModel)
#Add to the list of child models
self.children.append(newChildModel)
def setChildLastUpdated(self,child):
for _child in self.children:
_child.lastUpdated = False
child.lastUpdated = True
'''
Return a pytorch tensor of the centers of all child models.
'''
def getCenters(self):
#Get the center of each child model
centersList = list(map(lambda x:x.center.reshape((x.center.shape[0])),self.children))
#Return the centers after stacking in new dimension
return torch.stack(centersList,dim=0)
'''
Returns the index of the closest child model to the point x, as well as the distance
between the model's center and x.
'''
def getClosestChild(self,x):
#Compute distances between new input x and existing inputs
distances = self.getDistanceToCenters(x)
#Get the single minimum distance from the tensor (max covar)
minResults = torch.max(distances,1) if distances.dim()>1 else torch.max(distances,0)
return minResults[1],minResults[0]
'''
Compute the distances from the point x to each center
'''
def getDistanceToCenters(self,x,returnPowers=False):
centers = self.getCenters()
x,centers = x.double(),centers.double()
distances = self.covar_module(x,centers).evaluate()
powers = torch.zeros(distances.shape)
#Switch to double precision for this calculation
'''
vec = ((x-centers.repeat(x.shape[0],1))/self.covar_module.lengthscale).double().repeat(x.shape[0],1)
powers = .5*torch.sum(vec**2,dim=1)
distances = torch.exp(-powers)
'''
if returnPowers:
return distances.squeeze(0),powers
else:
return distances.squeeze(0)
'''
Make a prediction at the point(s) x. This method is a wrapper which handles the messy case of multidimensional inputs.
The actual prediction is done in the predictAtPoint helper method. If no M is given, use default
'''
def predict(self,x,individualPredictions=False,getVar=False):
return self.predict_Helper(x,self.M,individualPredictions,getVar)
'''
Make a prediction at the point(s) x. This method is a wrapper which handles the messy case of multidimensional inputs.
The actual prediction is done in the predictAtPoint helper method
'''
def predict_Helper(self,x,M,individualPredictions,getVar):
if M is None:
M = len(self.children)
else:
M = min(M,len(self.children))
#Update all of the covar modules to the most recent
if self.inheritKernel:
for child in self.children:
child.covar_module = self.covar_module
#If not inheriting kernel, then average the lengthscale params of child kernels
else:
lengthscales = [child.covar_module.lengthscale for child in self.children]
self.covar_module.lengthscale = torch.mean(torch.stack(lengthscales),dim=0)
mean_predictions = []
var_predictions = []
#Get the predictions of each child at each point
for child in self.children:
prediction = child.predict(x)
mean_predictions.append(prediction.mean)
var_predictions.append(prediction.variance)
#Concatenate into pytorch tensors
mean_predictions = torch.stack(mean_predictions).transpose(0,1)
var_predictions = torch.stack(var_predictions).transpose(0,1)
#Squeeze out any extra dims that may have accumulated
if mean_predictions.dim()>2:
mean_predictions = mean_predictions.squeeze()
var_predictions = var_predictions.squeeze()
#if the predictions are done at a single point, we need to unsqueeze in dim 0
if mean_predictions.dim()<2:
mean_predictions = mean_predictions.unsqueeze(-1)
var_predictions = var_predictions.unsqueeze(-1)
#Transpose to agree with minIndices dims
#Note: This only needs to be done for the incremental experiments where we track memory usage.
#Leave this commented out otherwise
'''
mean_predictions = mean_predictions.transpose(0,1)
var_predictions = var_predictions.transpose(0,1)
'''
#We don't need this weighting procedure if there is only one child
if mean_predictions.shape[-1]>1:
#Get the covar matrix
distances = self.getDistanceToCenters(x)
#Get the M closest child models. Need to squeeze out extra dims of 1.
sortResults = torch.sort(distances.squeeze(-1).squeeze(-1),descending=True)
#Get the minDists for weighting predictions
#minDists = sortResults[0][:,:M].squeeze(-1) if sortResults[0].dim()>1 else sortResults[0].unsqueeze(0)
minDists = sortResults[0][:,:M] if sortResults[0].dim()>1 else sortResults[0].unsqueeze(0)
#Get the min indices for selecting the correct predictions. If dim==1, then there is only one child, so no need to take up to M predictions
minIndices = sortResults[1][:,:M] if sortResults[1].dim()>1 else sortResults[1].unsqueeze(0)
#Get the associate predictions
gatherDim = 1 if mean_predictions.dim()>1 else 0
mean_predictions = mean_predictions.gather(gatherDim,minIndices)
var_predictions = var_predictions.gather(gatherDim,minIndices)
#Compute weights for the predictions. Switch to double precision for this somewhat unstable computation
minDists = minDists.double()
#If we have M=1, we need to unsqueeze for the summation
if minDists.dim() == 1:
minDists = minDists.unsqueeze(-1)
#Sum the m smallest distances for each prediction point to normalize
denominator = torch.sum(minDists,dim=1).unsqueeze(-1).repeat((1,minDists.shape[1]))
weights = minDists/denominator
#Compute weighted predictions.
#IMPORTANT: the weighted variance predictions are highly negatively biased since we do not account for the covariance between models
weighted_mean_predictions = torch.sum(weights * mean_predictions,dim=1)
weighted_var_predictions = torch.sum(weights**2 * var_predictions,dim=1)
else:
weighted_mean_predictions = mean_predictions
weighted_var_predictions = var_predictions
if getVar:
return weighted_mean_predictions,weighted_var_predictions
elif individualPredictions:
return weighted_mean_predictions,mean_predictions,weights,minDists
else:
return weighted_mean_predictions
'''
Make a prediction at the point x by finding the M closest child models and
computing a weighted average of their predictions. By default M is the number
of child models. If M < number of child models, use all of them.
THIS METHOD IS NOW DEPRECATED. DO NOT RELY ON THIS.
'''
def predictAtPoint(self,x,M=None,individualPredictions=False):
if M is None:
M = len(self.children)
else:
M = min(M,len(self.children))
#Compute distances between new input x and existing inputs
distances,powers = self.getDistanceToCenters(x,True)
#Get the M closest child models. Need to squeeze out extra dims of 1.
sortResults = torch.sort(distances.squeeze(-1).squeeze(-1),descending=True)
minDists = sortResults[0][:M].squeeze(-1) if sortResults[0].dim()>0 else sortResults[0].unsqueeze(0)
minIndices = sortResults[1][:M] if sortResults[1].dim()>0 else sortResults[1].unsqueeze(0)
closestChildren = [self.children[i] for i in minIndices]
'''
Get a posterior distribution for each child model. Note each will be
multivariate normal. Then compute weighted average of the means of the
posterior distributions.
'''
posteriorMeans = []
for child in closestChildren:
posterior = child.predict(x)
posteriorMeans.append(posterior.mean)
'''
TODO: It would be better to instead compute the weighted average of the
posterior distributions so we have access to variance as well.
'''
posteriorMeans = torch.stack(posteriorMeans)
#We need to be careful with this computation. If the covariances are very small, we may end up with a nan value here.
nonZeroDists = minDists[minDists>0.0]
#Address the case where we are predicting very far away from all models. Take unweighted mean of all predictions
if nonZeroDists.shape[-1]==0:
weights = 1.0/(powers+1.0)
weights = weights/torch.sum(weights)
else:
minDists = minDists
weights = minDists/torch.sum(minDists)
weightedAverageMean = torch.dot(weights,posteriorMeans.squeeze(-1).double()).float()
if individualPredictions:
return weightedAverageMean,posteriorMeans,weights,minDists
else:
return weightedAverageMean
class LocalGPChild(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, parent, inheritKernel=True, **kwargs):
#Track if the child was created by splitting
self.isSplittingChild = True if 'split' in kwargs and kwargs['split'] else False
#Handle prior likelihood
if 'priorLik' in kwargs and kwargs['priorLik'] is not None:
priorLik = kwargs['priorLik']
else:
#If no prior is provided, use the default of the parent
priorLik = parent.likelihood()
#In this case, we reset the isSplittingChild flag to false in order for the new likelihood to be trained
self.isSplittingChild = False
super(LocalGPChild, self).__init__(train_x, train_y, priorLik)
#Set to double mode
self.double()
self.likelihood.double()
self.parent = parent
if 'priorMean' in kwargs and kwargs['priorMean'] is not None:
#If given, take a prior for the mean. Used for splitting models.
self.mean_module = copy.deepcopy(kwargs['priorMean'])
else:
self.mean_module = parent.mean_module()
'''
If inheritKernel is set to True, then the same Kernel function (including the same hyperparameters)
will be used in all of the child models. Otherwise, a separate instance of the same kernel function
is used for each child model.
'''
if inheritKernel:
self.covar_module = parent.covar_module
else:
self.covar_module = parent.covar_module.__class__(ard_num_dims=train_x.shape[1] if train_x.dim()>1 else 1)
self.lastUpdated = True
'''
Compute the center as the mean of the training data
'''
self.center = torch.mean(train_x,dim=0)
if self.center.dim()==0:
self.center = self.center.unsqueeze(0)
self.train_x = train_x
self.train_y = train_y
self.trained = False
self.initTraining()
def forward(self,x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
def update(self,x,y):
#Sync covar
if self.parent.inheritKernel:
self.covar_module = self.parent.covar_module
#Update train_x, train_y
self.train_x = torch.cat([self.train_x, x])
self.train_y = torch.cat([self.train_y, y])
#Update the data which can be used for optimizing
self.train_inputs = (self.train_x,)
self.train_targets = self.train_y
#Flag the child as not having been trained.
self.trained = False
#Update center
self.center = torch.mean(self.train_x,dim=0)
if self.center.dim()==0:
self.center = self.center.unsqueeze(0)
return self
'''
Perform a rank-one update of the child model's inverse covariance matrix cache.
'''
def updateInvCovarCache(self,update=False):
lazy_covar = self.prediction_strategy.lik_train_train_covar
if is_in_cache(lazy_covar,"root_inv_decomposition"):
if update:
#Get the old cached inverse covar matrix
K_0inv = lazy_covar.root_inv_decomposition()
#Get the new covar matrix by calling the covar module on the training data
K = self.covar_module(self.train_x)
#Compute the update
Kinv = updateInverseCovarWoodbury(K_0inv, K)
#Store updated inverse covar matrix in cache
add_to_cache(lazy_covar, "root_inv_decomposition", RootLazyTensor(torch.sqrt(Kinv)))
else:
#This is a bit dirty, but here we will simply delete the root/root_inv from cache. This forces
#GPyTorch to recompute them.
lazy_covar._memoize_cache = {}
self.prediction_strategy._memoize_cache = {}
'''
Setup optimizer and perform initial training
'''
def initTraining(self):
#Switch to training mode
self.train()
self.likelihood.train()
#We only train on instantiation if the child model is not a result of a split
if not self.isSplittingChild:
#Setup optimizer
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.1)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self)
mll.double()
#Perform training iterations
training_iter = self.parent.training_iter
for i in range(training_iter):
self.optimizer.zero_grad()
output = self(self.train_x)
loss = -mll(output, self.train_y)
loss.backward()
self.optimizer.step()
self.trained = True
'''
Retrain model after new data is obtained
'''
def retrain(self):
#Switch to training mode
self.train()
self.likelihood.train()
mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self)
#Perform training iterations
training_iter = self.parent.training_iter
for i in range(training_iter):
self.optimizer.zero_grad()
output = self(self.train_x)
loss = -mll(output, self.train_y)
loss.backward()
self.optimizer.step()
self.trained = True
'''
Evaluate the child model to get the predictive posterior distribution
'''
def predict(self,x):
if not self.trained:
self.retrain()
#Switch to eval/prediction mode
self.eval()
self.likelihood.eval()
with torch.no_grad(), gpytorch.settings.fast_pred_var():
prediction = self.likelihood(self(x))
return prediction
| 2.546875
| 3
|
core/controls/run.py
|
intercellar/FLOOR-E
| 0
|
12781378
|
<filename>core/controls/run.py
#!/usr/bin/python -B
from __future__ import division
import threading
import Tkinter as tk
import urllib
import json
from time import sleep, time
from os import system, listdir
import tkMessageBox
import Robot
import os
import Adafruit_PCA9685
#import RPi.GPIO as GPIO
#GPIO.setmode(GPIO.BCM)
#light_pin = 21
#GPIO.setup(light_pin, GPIO.OUT)
#GPIO.output(light_pin, GPIO.LOW)
os.system("xset r off")
display = {'lid': -1,
'pcb': -1,
'acc': -1
}
FS = 2
WAIT = .1
LEFT_TRIM = 0
RIGHT_TRIM = 0
#ROBOT VARIABLES
robot = Robot.Robot(left_trim=LEFT_TRIM, right_trim=RIGHT_TRIM)
key = {"key":"w","first":0,"last":1}
light = 0
bright = 50
speed = 160
heights = []
h = 0
#servo stuff
pwm = Adafruit_PCA9685.PCA9685()
#150 600
servo_minh = 550
servo_maxh = 750
servo_minv = 250
servo_maxv = 500
servo_minp = 25
servo_maxp = 350
servo_midh = int((servo_minh+servo_maxh)/2)
servo_midv = int((servo_minv+servo_maxv)/2)
servo_midp = int((servo_minp+servo_maxp)/2)
pwm.set_pwm_freq(60)
shori = 4
svert = 7
spcb = 2
print("start")
#pwm.set_pwm(shori, 0, servo_midh)
#pwm.set_pwm(svert, 0, servo_midv)
#pwm.set_pwm(spcb, 0, servo_maxp)
#sleep(1)
#pwm.set_pwm(svert, 0, servo_min)
servo0 = servo_midv
servo1 = servo_midh
servo2 = servo_maxp
class MainApp(tk.Tk):
def __init__(self, display):
tk.Tk.__init__(self)
tk.Tk.wm_title(self, 'Intercellar')
self.attributes('-fullscreen', True)
self.display = display
self.container = tk.Frame(self)
self.container.pack(side='top', fill='both', expand=True)
self.container.grid_rowconfigure(0, weight=1)
self.container.grid_columnconfigure(0, weight=1)
self.menubar = tk.Menu(self)
self.menubar.add_command(label='Quit', command=quit)
self.menubar.add_command(label='Main', command=lambda: self.show_frame(Main))
self.menubar.add_command(label='Testing', command=lambda: self.show_frame(Testing))
tk.Tk.config(self, menu=self.menubar)
self.frames = {}
for F in [Main, Testing]:
frame = F(self.container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky='nsew')
self.show_frame(Testing)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class Main(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.info_widget = Info_Widget(self)
self.info_widget.pack(side=tk.RIGHT)
self.info_widget.add_number('LID', '--', 0, 0, top='LIDAR', bottom='in')
self.info_widget.add_number('PCB', '--', 1, 0, top='PCB', bottom='in')
self.info_widget.add_number('ACC', '--', 2, 0, top='ACC', bottom='')
self.info_widget.add_number('RES', '--', 3, 0, top='RES', bottom='in')
self.info_widget2 = Info_Widget(self)
self.info_widget2.pack(side=tk.LEFT)
self.info_widget2.add_number('LIGHT', '--', 0, 0, top='LIGHT', bottom='')
self.info_widget2.add_number('LM', '--', 1, 0, top='LM', bottom='/255')
self.info_widget2.add_number('RM', '--', 2, 0, top='RM', bottom='/255')
self.info_widget2.add_number('ECE', '--', 3, 0, top='ECE', bottom='')
def refresh(self, display):
global h
self.info_widget.info['LID'].change_text(display['lid'])
self.info_widget.info['PCB'].change_text(display['pcb'])
self.info_widget.info['ACC'].change_text(display['acc'])
try:
lid = float(display['lid'])
pcb = float(display['pcb'])
acc = float(display['acc'])
except:
lid = 0
pcb = 0
acc = 1
res = round((lid - pcb) * acc, 2)
res = min(45, res)
res = max(0, res)
h = res
self.info_widget.info['RES'].change_text(str(res))
class Testing(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.BD1 = Button_Display(self, opt=1)
self.BD1.pack(side=tk.LEFT)
self.info_widget = Info_Widget(self)
self.info_widget.add_big('RES', '--', 0, 0, top='HEIGHT', bottom='in')
"""
self.info_widget.add_number('PCB', 'PCB: -- in', 1, 0)
self.info_widget.add_number('LID', 'LID: -- in', 2, 0)
self.info_widget.add_number('ANG', 'ANG: -- ' + u'\u00B0', 3, 0)
"""
self.info_widget.pack(side=tk.LEFT, padx=60, pady=10)
self.BD2 = Button_Display(self, opt=2)
self.BD2.pack(side=tk.RIGHT)
def refresh(self, display):
global h
try:
lid = float(display['lid'])
pcb = float(display['pcb'])
acc = float(display['acc'])
except:
lid = 0
pcb = 0
acc = 1
res = round((lid - pcb) * acc, 2)
res = min(45, res)
res = max(0, res)
h = res
self.info_widget.info['RES'].change_text(str(res))
class Button_Display(tk.Frame):
def __init__(self, parent, opt=1):
tk.Frame.__init__(self, parent)
pady=10
height=3
width=20
if opt == 1:
self.B1 = tk.Button(self, text='SPEED', font='Consolas 14 bold', pady=pady, height=height, width=10, disabledforeground='black', bg='#4286f4', state=tk.DISABLED)
self.B2 = tk.Button(self, text='LOW', pady=pady, height=height, width=width, command=set_speed_low)
self.B3 = tk.Button(self, text='MED', pady=pady, height=height, width=width, command=set_speed_med)
self.B4 = tk.Button(self, text='HIGH', pady=pady, height=height, width=width, command=set_speed_high)
for self.B in [self.B1, self.B2, self.B3, self.B4]:
self.B.pack(side=tk.TOP, pady=pady)
if opt == 2:
self.B1 = tk.Button(self, text='LIGHT', font='Consolas 14 bold', pady=pady, height=height, width=10, disabledforeground='black', bg='#4286f4', state=tk.DISABLED)
self.B2 = tk.Button(self, text='LOW', pady=pady, height=height, width=width, command=set_light_low)
self.B3 = tk.Button(self, text='MED', pady=pady, height=height, width=width, command=set_light_med)
self.B4 = tk.Button(self, text='HIGH', pady=pady, height=height, width=width, command=set_light_high)
for self.B in [self.B1, self.B2, self.B3, self.B4]:
self.B.pack(side=tk.TOP, pady=pady)
class Info_Widget(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.info = {}
def add_big(self, id_str, text, row, column, top=False, bottom=False):
PADX = 5
PADY = 10
self.info[id_str] = Number_Display(self, text, top=top, bottom=bottom)
self.info[id_str].grid(padx=PADX, pady=PADY, row=row, column=column)
def add_number(self, id_str, text, row, column, top=False, bottom=False):
PADX = 5
PADY = 1
self.info[id_str] = Number_Display2(self, text, top=top, bottom=bottom)
self.info[id_str].grid(padx=PADX, pady=PADY, row=row, column=column)
def change_text(self, text):
self.itemconfig(self.text, text=text)
class Number_Display2(tk.Canvas):
def __init__(self, parent, text, top=False, bottom=False):
tk.Canvas.__init__(self, parent, height=100, width=100)
#self.rect = self.create_rectangle(0, 0, 100, 30, fill='#C0C0C0', width=0)
self.rect = self.create_rectangle(0, 0, 100, 30, fill='#C0C0C0', width=0)
self.text = self.create_text(50, 15, text=text, fill='black', font='Consolas 9 bold')
"""
if top:
self.top = self.create_text(50, 15, text=top, fill='black', font='Consolas 9 bold')
if bottom:
self.bot = self.create_text(50, 85, text=bottom, fill='black', font='Consolas 9 bold')
"""
def change_text(self, text):
self.itemconfig(self.text, text=text)
class Number_Display(tk.Canvas):
def __init__(self, parent, text, top=False, bottom=False):
tk.Canvas.__init__(self, parent, height=152, width=152)
#self.rect = self.create_rectangle(0, 0, 150, 150, fill='#C0C0C0', width=0)
self.rect = self.create_rectangle(1, 1, 150, 150, outline='black', activeoutline='black', width=1)
self.text = self.create_text(75, 75, text=text, fill='black', font='Consolas 14 bold')
if top:
self.top = self.create_text(75, 25, text=top, fill='black', font='Consolas 9 bold')
if bottom:
self.bot = self.create_text(75, 125, text=bottom, fill='black', font='Consolas 9 bold')
def change_text(self, text):
self.itemconfig(self.text, text=text)
last = (time(), None)
def key_input(event):
key_press = event.char
kp = key_press.lower()
st = 0
print(key_press.lower())
global key
global speed
global bright
now = time()
#print('pressed: {}, {}'.format(key_press.lower(), now-last[0]))
if(kp=='w' or kp=='a' or kp=='s' or kp=='d'):
if(kp == key["key"]):
key["last"] = now
else:
key["key"] = kp
key["first"] = now
key["last"] = now
if key_press.lower() == 's':
#print("pressed")
robot.forward(speed)
sleep(st)
if key_press.lower() == 'w':
#print("pressed")
robot.backward(speed)
sleep(st)
if key_press.lower() == 'd':
robot.right(speed)
sleep(st)
if key_press.lower() == 'a':
robot.left(speed)
sleep(st)
global h
#non-movement controls
if key_press.lower() == 'p':
party_bot()
if key_press.lower() == 'e':
heights.append(h)
if key_press.lower() == 'q':
print("Heights: ", heights)
robot.set_light1(0)
#GPIO.output(light_pin, GPIO.LOW)
quit()
if key_press.lower() == 'r':
global light
if(light):
light = 0
robot.set_light1(0)
#GPIO.output(light_pin, GPIO.LOW)
else:
light = 1
print("light") #250
robot.set_light1(bright)
#GPIO.output(light_pin, GPIO.HIGH)
#key = key_press.lower()
#servo controls
global servo0
global servo1
global servo2
global s
global up
global svert
global shori
global spcb
delta = 5
delta2 = 1
if kp=='t' or kp=='g' or kp=='h' or kp=='f':
if kp == key['key']:
key['last'] = now
else:
key['key'] = kp
key['first'] = now
key['last'] = now
if key_press.lower() == 't':
if(servo0 < servo_maxv):
servo0 = min(servo_maxv, servo0+delta)
pwm.set_pwm(svert, 0, servo0)
s = svert
up = True
press_servo()
#while(now-key['first'] <= .49 and servo0 < servo_max):
# servo0 = min(servo_max, servo0+delta2)
# pwm.set_pwm(0,0,servo0)
if key_press.lower() == 'g':
if(servo0 > servo_minv):
servo0 = max(servo_minv, servo0-delta)
pwm.set_pwm(svert, 0, servo0)
s = svert
up = False
press_servo()
#while(now-key['first'] <=.49 and servo0 > servo_min):
# servo0 = max(servo_min, servo0-delta2)
# pwm.set_pwm(0, 0, servo0)
if key_press.lower() == 'f':
if(servo1 < servo_maxh):
servo1 = min(servo_maxh, servo1+delta)
pwm.set_pwm(shori, 0, servo1)
s = shori
up = True
press_servo()
#while(now-key['first'] <= .49 and servo1 < servo_max):
# servo1 = min(servo_max, servo1+delta2)
# pwm.set_pwm(1, 0, servo1)
if key_press.lower() == 'h':
if(servo1 > servo_minh):
servo1 = max(servo_minh, servo1-delta)
pwm.set_pwm(shori, 0, servo1)
s = shori
up = False
press_servo()
#while(now-key['first'] <=.49 and servo1 > servo_min):
# servo1 = max(servo_min, servo1-delta2)
# pwm.set_pwm(1, 0, servo1)
if key_press.lower() == 'v':
servo2 = pwm.set_pwm(spcb, 0, servo_minp)
if key_press.lower() == 'b':
servo2 = pwm.set_pwm(spcb, 0, servo_maxp)
#print(servo0)
def stop_bot(event):
global key
#key = None
#print("depressed")
key_release = event.char
kp = key_release.lower()
now = time()
#print('released: {}, {}'.format(key_release.lower(), now-last[0]))
if key_release.lower() == 'a' or key_release.lower() == 'w' or key_release.lower() == 's' or key_release.lower() == 'd':
if(now-key["first"] <= .49):
#if False:
print("stopbot++++++++++++++++++++++++++++++++++++++++++++++")
print(key["first"])
key["key"] = "-1"
robot.stop()
if kp=='t' or kp=='g' or kp=='f' or kp=='h':
if(now-key['first'] <= .49):
print("stop serv")
key["key"] = "-1"
key["first"] = 0
def fformat(item):
return str(item)
def check_for_stop():
global key
#print("cfs")
now = time()
if(now-key["first"] > .49):#.485
if(now-key["last"] > .1):
#print("cfs")
#print(key["first"])
key["key"] = "-1"
robot.stop()
sleep(.05)
t = threading.Thread(target=check_for_stop)
t.daemon = True
t.start()
def press_servo():
global key
global servo0
global servo1
global s
global up
global shori
#global svert
#if(s == shori):
# servoX = servo1
#else:
# servoX = servo0
#print("press_servo")
now = time()
servoX = servo1 if s == shori else servo0
servo_min = servo_minh if s==shori else servo_minv
servo_max = servo_maxh if s==shori else servo_maxv
if(now-key['first']<=.49 and servoX > servo_min and servoX < servo_max):
#if(up):
# servoX = min(servo_max, servoX+1)
#else:
delta = 2
servoX = min(servo_max,servoX+delta) if up else max(servo_min, servoX-delta)
pwm.set_pwm(s, 0, servoX)
if(s == shori):
servo1 = servoX
else:
servo0 = servoX
sleep(.05)
t = threading.Thread(target=press_servo)
t.daemon = True
t.start()
def party_bot():
robot.right(250)
now = time()
while(now - time() < 5):
robot.set_light1(250)
sleep(.5)
robot.set_light1(0)
sleep(.5)
robot.stop()
robot.set_light(0)
def set_speed_low():
global speed
global app
app.frames[Testing].BD1.B2.config(relief=tk.SUNKEN)
app.frames[Testing].BD1.B3.config(relief=tk.RAISED)
app.frames[Testing].BD1.B4.config(relief=tk.RAISED)
speed = 140
def set_speed_med():
global speed
app.frames[Testing].BD1.B2.config(relief=tk.RAISED)
app.frames[Testing].BD1.B3.config(relief=tk.SUNKEN)
app.frames[Testing].BD1.B4.config(relief=tk.RAISED)
speed = 190
def set_speed_high():
global speed
app.frames[Testing].BD1.B2.config(relief=tk.RAISED)
app.frames[Testing].BD1.B3.config(relief=tk.RAISED)
app.frames[Testing].BD1.B4.config(relief=tk.SUNKEN)
speed = 254
def set_light_low():
global bright
app.frames[Testing].BD2.B2.config(relief=tk.SUNKEN)
app.frames[Testing].BD2.B3.config(relief=tk.RAISED)
app.frames[Testing].BD2.B4.config(relief=tk.RAISED)
bright = 10
def set_light_med():
global bright
app.frames[Testing].BD2.B2.config(relief=tk.RAISED)
app.frames[Testing].BD2.B3.config(relief=tk.SUNKEN)
app.frames[Testing].BD2.B4.config(relief=tk.RAISED)
bright = 60
def set_light_high():
global bright
app.frames[Testing].BD2.B2.config(relief=tk.RAISED)
app.frames[Testing].BD2.B3.config(relief=tk.RAISED)
app.frames[Testing].BD2.B4.config(relief=tk.SUNKEN)
bright = 250
def build_display():
try:
dataLink = 'http://core.local/getinfo'
data = urllib.urlopen(dataLink)
data = data.read().decode('utf-8')
data = json.loads(data)
#print(data)
except:
print('problem')
exit()
display = data
return display
def update():
global display
display = build_display()
app.frames[Testing].refresh(display)
t = threading.Timer(FS, update)
t.daemon = True
t.start()
app = None
if __name__ == '__main__':
check_for_stop()
app = MainApp(display)
app.bind('<KeyPress>', key_input)
app.bind('<KeyRelease>', stop_bot)
update()
app.mainloop()
| 2.40625
| 2
|
smartteddy.basket.computer/web/smartteddydashboard/smartteddydashboard/settings.py
|
smart-teddy-project-hhs/SmartTeddy
| 2
|
12781379
|
from pathlib import Path
import environ
env = environ.Env(
# set casting, default value
DEBUG=(bool, False)
)
environ.Env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'speechrecognition.apps.SpeechrecognitionConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'flags',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'smartteddydashboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
# TODO Use a more restrictive permission, because allow any can be insecure.
'rest_framework.permissions.AllowAny',
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_PARSER_CLASSES': [
'rest_framework.parsers.JSONParser',
]
}
WSGI_APPLICATION = 'smartteddydashboard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# Feature toggles that are pre-defined
# Django-flags https://cfpb.github.io/django-flags/
FLAGS = {
'FLAG_WITH_EMPTY_CONDITIONS': [],
'FLAG_WITH_ANY_CONDITIONS': [],
'FLAG_WITH_REQUIRED_CONDITIONS': [],
}
| 1.796875
| 2
|
undergen/lib/audio.py
|
DigiDuncan/undergen
| 0
|
12781380
|
from typing import TYPE_CHECKING
import requests
if TYPE_CHECKING:
from undergen.lib.data import Character
url = "https://api.15.ai/app/getAudioFile5"
cdn_url = "https://cdn.15.ai/audio/"
headers = {'authority': 'api.15.ai',
'access-control-allow-origin': '*',
'accept': 'application/json, text/plain, */*',
'dnt': '1',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36',
'content-type': 'application/json;charset=UTF-8',
'sec-gpc': '1',
'origin': 'https://15.ai',
'sec-fetch-site': 'same-site',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://15.ai/',
'accept-language': 'en-US,en;q=0.9'}
def get_sound(character: "Character", text: str):
character_name = character.sound_name
emotion = "Normal"
print(f"Getting audio for {character_name} '{text}'...")
response = requests.post(url, json = {
"character": character_name,
"emotion": emotion,
"text": text
}, headers = headers)
if response.status_code != 200:
raise RuntimeError(f"15.ai responded with code {response.status_code}.")
data_json = response.json()
wav_name = data_json["wavNames"][0]
second_response = requests.get(cdn_url + wav_name)
if second_response.status_code != 200:
raise RuntimeError(f"15.ai CDN responded with code {second_response.status_code}.")
print("Audio success!")
return second_response.content
| 2.765625
| 3
|
setup.py
|
ben-hoover/django-tz-detect
| 0
|
12781381
|
<reponame>ben-hoover/django-tz-detect
#!/usr/bin/env python
import os
import re
import sys
import codecs
import subprocess
from setuptools import setup, find_packages
def read(*parts):
file_path = os.path.join(os.path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
setup(
name='django-tz-detect',
version=read('VERSION'),
license='MIT License',
install_requires=[
'django>=1.4.2',
'pytz',
'six',
],
requires=[
'Django (>=1.4.2)',
],
description='Automatic user timezone detection for django',
long_description=read('README.rst'),
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='http://github.com/adamcharnock/django-tz-detect',
download_url='https://github.com/adamcharnock/django-tz-detect/zipball/master',
packages=find_packages(exclude=('example*', '*.tests*')),
include_package_data=True,
tests_require=[
'django-setuptest',
'coveralls',
],
test_suite='setuptest.setuptest.SetupTestSuite',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| 1.8125
| 2
|
backend/apps/projects/efficiency/serializers.py
|
wuchaofan1654/tester
| 0
|
12781382
|
from django.contrib.auth import get_user_model
from rest_framework import serializers
from apps.basics.op_drf.serializers import CustomModelSerializer
from apps.projects.efficiency.models import Efficiency
from apps.projects.efficiency.models import Module
UserProfile = get_user_model()
class EfficiencySerializer(CustomModelSerializer):
"""
简单菜单序列化器
"""
class Meta:
model = Efficiency
# fields = '__all__'
exclude = ('description', 'creator', 'modifier')
class ModuleSerializer(CustomModelSerializer):
"""
模块管理 简单序列化器
"""
parentId = serializers.IntegerField(source="parentId.id", default=0)
class Meta:
model = Module
exclude = ('description', 'creator', 'modifier')
class ModuleCreateUpdateSerializer(CustomModelSerializer):
"""
模块管理 创建/更新时的列化器
"""
def validate(self, attrs: dict):
return super().validate(attrs)
class Meta:
model = Module
fields = '__all__'
class ModuleTreeSerializer(serializers.ModelSerializer):
"""
模块树形架构序列化器:递归序列化所有深度的子模块
"""
label = serializers.CharField(source='name', default='')
parentId = serializers.IntegerField(source="parentId.id", default=0)
class Meta:
model = Module
fields = ('id', 'label', 'parentId', 'status')
| 2.21875
| 2
|
users/models.py
|
hrbhat/twissandra
| 308
|
12781383
|
# Nope, we're using Cassandra :)
| 0.777344
| 1
|
config.py
|
IntershopCommunicationsAG/ish-monitoring-postgresqldb-exporter
| 1
|
12781384
|
"""
This file stores all the possible configurations for the Flask app.
Changing configurations like the secret key or the database
url should be stored as environment variables and imported
using the 'os' library in Python.
"""
import os
class BaseConfig:
SQLALCHEMY_TRACK_MODIFICATIONS = False
SSL = os.getenv('POSTGRESQL_SSL', True)
if isinstance(SSL, str):
SSL = SSL.lower() in ['true', '1', 'yes', "t"]
DATABASE = os.getenv('POSTGRESQL_DATABASE', 'postgres')
HOST = os.getenv('POSTGRESQL_HOST', 'localhost')
PORT = os.getenv('POSTGRESQL_PORT', 5432 )
USERNAME = os.getenv('POSTGRESQL_USERNAME', 'root')
PASSWORD = os.getenv('POSTGRESQL_PASSWORD', '<PASSWORD>')
COLLECT_METRICS_INTERVAL_SEC = int(
os.getenv('COLLECT_METRICS_INTERVAL_SEC', 120))
DEBUG = False
TESTING = False
class TestingConfig(BaseConfig):
DEBUG = True
TESTING = True
class DevelopmentConfig(BaseConfig):
DEBUG = True
class ProductionConfig(BaseConfig):
DEBUG = False
| 2.453125
| 2
|
envs/traffic_counter.py
|
iron88sk/Project
| 0
|
12781385
|
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import configparser
import env
from envs.seoul_env import SeoulEnv, SeoulController
import numpy as np
import matplotlib
ilds_map ={'1_l', '1_r', '2_l', '2_r', '3_u', '3_d'}
class SeoulConuterEnv(SeoulEnv):
def __init__(self, config, port=0, output_path='', is_record=False, record_stat=False, sumo_config=None):
self.sumo_config = sumo_config
self.ilds_map = ilds_map
self.counts_map = dict()
self.vehicles_in_lane = dict()
for edge in set(self.ilds_map.values()):
self.vehicles_in_lane[edge] = list()
super().__init__(config, output_path, is_record, record_stat, port=port)
def _init_sim_config(self, seed=None):
return self.sumo_config
def step(self, action):
self.count()
super().step(action)
def count(self):
for ilds in self.ilds_map.keys():
vid = self.sim.lanearea.getLastStepVehicleIDs(ild)
class TrafficCounter:
def __init__(self, config, base_dir, sumo_config):
self.config = config
self.base_dir = base_dir
self.sumo_config = sumo_config
if not os.path.exists(self.base_dir):
os.mkdir(self.base_dir)
self.env = SeoulCounterEnv(self.config['ENV_CONFIG'], 2,self.base_dir, is_record=True, record_stat=True, sumo_config=self.sumo_config)
self.ob = env.reset()
self.controller = SeoulController(self.env.node_names, self.env.nodes)
def exploreGreedy(self):
while True:
it += 1
next_ob, _, done, reward = self.env.step(self.controller.forward(self.ob))
if done:
break
self.ob = next_ob
self.env.terminate()
def run(self):
self.exploreGreedy()
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read('./config/config_greedy_seoul.ini')
base_dir = './output_result/'
counter = TrafficCounter(config, base_dir)
counter.run()
| 2.28125
| 2
|
src/awkward/_v2/operations/structure/ak_is_none.py
|
BioGeek/awkward-1.0
| 0
|
12781386
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
def is_none(array, axis=0, highlevel=True, behavior=None):
raise NotImplementedError
# """
# Args:
# array: Data to check for missing values (None).
# axis (int): The dimension at which this operation is applied. The
# outermost dimension is `0`, followed by `1`, etc., and negative
# values count backward from the innermost: `-1` is the innermost
# dimension, `-2` is the next level up, etc.
# highlevel (bool): If True, return an #ak.Array; otherwise, return
# a low-level #ak.layout.Content subclass.
# behavior (None or dict): Custom #ak.behavior for the output array, if
# high-level.
# Returns an array whose value is True where an element of `array` is None;
# False otherwise (at a given `axis` depth).
# """
# def getfunction(layout, depth, posaxis):
# posaxis = layout.axis_wrap_if_negative(posaxis)
# if posaxis == depth - 1:
# nplike = ak.nplike.of(layout)
# if isinstance(layout, ak._v2._util.optiontypes):
# return lambda: ak._v2.contents.NumpyArray(
# nplike.asarray(layout.bytemask()).view(np.bool_)
# )
# elif isinstance(
# layout,
# (
# ak._v2._util.unknowntypes,
# ak._v2._util.listtypes,
# ak._v2._util.recordtypes,
# ak._v2.contents.NumpyArray,
# ),
# ):
# return lambda: ak._v2.contents.NumpyArray(
# nplike.zeros(len(layout), dtype=np.bool_)
# )
# else:
# return posaxis
# else:
# return posaxis
# layout = ak._v2.operations.convert.to_layout(array)
# out = ak._v2._util.recursively_apply(
# layout, getfunction, pass_depth=True, pass_user=True, user=axis
# )
# return ak._v2._util.maybe_wrap_like(out, array, behavior, highlevel)
| 2.140625
| 2
|
ci/release.py
|
steve-louis/mist-ce
| 778
|
12781387
|
<gh_stars>100-1000
#!/usr/bin/env python
import os
import sys
import hashlib
import argparse
import magic
import requests
def main():
args = parse_args()
request = Client(args.owner, args.repo, args.token)
update_release(
request, args.tag, msg=args.msg, files=args.files,
prerelease=args.prerelease, draft=args.draft,
remove_undefined_files=args.remove_undefined_files,
)
def parse_args():
argparser = argparse.ArgumentParser(
description=("Create/Update Github release based on git tag. When "
"creating a release that doesn't exist, it'll be marked "
"as stable (not prerelease) and public (not draft), "
"unless otherwise specified. When updating a release, "
"only fields specified by corresponding options will be "
"modified."))
argparser.add_argument('owner', help="The github repo's owner")
argparser.add_argument('repo', help="The github repo's name")
argparser.add_argument('tag', help="Tag name for which to make release")
argparser.add_argument(
'-m', '--msg', default=None,
help=("Message for the release. Either the message as a string, or "
"the filename of a text file preceded by '@'. Use an empty "
"string '' to set an empty message."))
argparser.add_argument(
'-f', '--files', nargs='+', metavar='FILE',
help="Files to upload as release assets.")
argparser.add_argument(
'--remove-undefined-files', action='store_true',
help=("If specified, remove any preexisting files from the release "
"that aren't currently specified with the `--files` option."))
argparser.add_argument(
'--prerelease', dest='prerelease', default=None, action='store_true',
help="Mark release as prerelease.")
argparser.add_argument(
'--no-prerelease', dest='prerelease', default=None,
action='store_false',
help="Mark release as regular release, no prerelease.")
argparser.add_argument(
'--draft', dest='draft', default=None, action='store_true',
help="Mark release as draft.")
argparser.add_argument(
'--no-draft', dest='draft', default=None, action='store_false',
help="Publish release, unmark as draft.")
argparser.add_argument(
'--token', default=os.getenv('GITHUB_API_TOKEN'),
help=("Github API token to use. Can also be specified as env var "
"GITHUB_API_TOKEN."))
args = argparser.parse_args()
if args.msg and args.msg.startswith('@'):
with open(args.msg[1:], 'r') as fobj:
args.msg = fobj.read()
return args
class Client(object):
def __init__(self, owner, repo, token):
self.owner = owner
self.repo = repo
self.token = token
def __call__(self, url, method='GET', parse_json_resp=True,
api='https://api.github.com', **kwargs):
url = '%s/repos/%s/%s%s' % (api, self.owner, self.repo, url)
headers = kwargs.pop('headers', {})
headers.update({'Authorization': 'token %s' % self.token})
print("Will make %s request to %s: %s" % (method, url, kwargs))
resp = requests.request(method, url, headers=headers, **kwargs)
if not resp.ok:
print(resp.status_code)
print(resp.text)
raise Exception(resp.status_code)
if parse_json_resp:
try:
return resp.json()
except Exception:
print("Error decoding json response")
print(resp.text)
raise
else:
return resp
def print_release(release):
print('-' * 60)
for name, key in [('id', 'id'), ('name', 'name'),
('tag', 'tag_name'), ('ref', 'target_commitish'),
('draft', 'draft'), ('prerelease', 'prerelease')]:
print('%s: %s' % (name, release[key]))
print('assets:')
for asset in release['assets']:
print(' - %s' % asset['name'])
if release['body']:
print('msg: |')
for line in release['body'].splitlines():
print(' %s' % line)
print('-' * 60)
def update_release(request, tag, msg=None, files=None,
draft=None, prerelease=None,
remove_undefined_files=False):
# Check that the repo exists.
resp = request('')
# Find git tag corresponding to release.
resp = request('/tags')
for item in resp:
if item['name'] == tag:
sha = item['commit']['sha']
print("Tag %s points to %s" % (tag, sha))
break
else:
print("Tag %s doesn't exist" % tag)
sys.exit(1)
# Create or update github release.
data = {
'tag_name': tag,
'target_commitish': sha,
'name': tag,
'body': msg,
'draft': draft,
'prerelease': prerelease,
}
for key, val in list(data.items()):
if val is None:
data.pop(key)
for release in request('/releases'):
if release['tag_name'] == tag:
print("Found preexisting release.")
print_release(release)
for key in list(data.keys()):
if data[key] == release[key]:
data.pop(key)
if data:
print("Release already exists, updating.")
release = request('/releases/%s' % release['id'], 'PATCH',
json=data)
print_release(release)
else:
print("No need to modify release's metadata.")
break
else:
print("Creating a new release.")
release = request('/releases', 'POST', json=data)
print_release(release)
# Add or update assets.
assets = list(release['assets'])
for path in files or []:
name = os.path.basename(path)
uploaded = False
for i, asset in enumerate(list(assets)):
if asset['name'] != name:
continue
assets.pop(i)
print("Found already uploaded file '%s'" % path)
md5 = hashlib.md5()
resp = request('/releases/assets/%s' % asset['id'],
headers={'Accept': 'application/octet-stream'},
parse_json_resp=False, stream=True)
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
md5.update(chunk)
md5sum_remote = md5.hexdigest()
md5 = hashlib.md5()
with open(path, 'rb') as fobj:
while True:
chunk = fobj.read(1024)
if not chunk:
break
md5.update(chunk)
md5sum_local = md5.hexdigest()
if md5sum_local == md5sum_remote:
print("Preexisting file matches local file")
uploaded = True
break
print("Deleting preexisting different asset.")
request('/releases/assets/%s' % asset['id'], 'DELETE',
parse_json_resp=False)
if not uploaded:
with open(path, 'rb') as fobj:
ctype = magic.Magic(mime=True).from_file(path)
request('/releases/%s/assets' % release['id'], 'POST',
api='https://uploads.github.com',
headers={'Content-Type': ctype},
params={'name': name}, data=fobj)
if remove_undefined_files:
for asset in assets:
print("Deleting preexisting undefined asset %s." % asset['name'])
request('/releases/assets/%s' % asset['id'], 'DELETE',
parse_json_resp=False)
if __name__ == "__main__":
main()
| 2.90625
| 3
|
build/lib/jet_django/__init__.py
|
lukejamison/jet-dasboard
| 193
|
12781388
|
VERSION = '0.0.1'
default_app_config = 'jet_django.apps.JetDjangoConfig'
| 1.046875
| 1
|
pages.py
|
mjwalker19/oTree-Retainage-Game
| 0
|
12781389
|
from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
import random
class SellerBidding(Page):
form_model = 'player'
form_fields = ['seller_bid', 'seller_exit']
def is_displayed(self):
return self.player.role() == 'seller'
def seller_bid_max(self):
return self.session.vars['values'][1]
def seller_bid_min(self):
return self.session.vars['costs'][0]
def error_message(self, values):
if values["seller_exit"] is not True and values["seller_bid"] is None:
return 'Please enter a bid or exit the round.'
def vars_for_template(self):
return self.player.vars_for_template()
class BuyerTrade(Page):
form_model = 'group'
form_fields = ['buyer_accept']
def is_displayed(self):
if self.player.role() == 'buyer' and not self.group.no_bid:
return True
else:
return False
def before_next_page(self):
print('@@@ stage 2 before next page ')
self.player.show_bid = True
print('@@@ buyer show price')
self.player.show_trade = True
print('@@@ buyer show trade')
self.player.show_IP = True
print('@@@ buyer show IP')
self.group.set_buyer_transfer_default()
print('@@@ transfer default set')
def vars_for_template(self):
return self.player.vars_for_template()
class SellerQuality(Page):
form_model = 'group'
form_fields = ['seller_quality']
def is_displayed(self):
if self.group.buyer_accept and self.player.is_winner:
return True
else:
return False
def before_next_page(self):
print('@@@ stage 3 before next page ')
self.player.show_trade = True
print('@@@ winning seller show trade')
self.player.show_IP = True
print('@@@ winning seller show IP')
self.player.show_quality = True
print('@@@ winning seller show quality')
def vars_for_template(self):
return self.player.vars_for_template()
class BuyerTransfer(Page):
form_model = 'group'
form_fields = ['buyer_transfer']
def is_displayed(self):
if self.session.vars['treatment'] > 0 and self.group.buyer_accept:
return self.player.role() == 'buyer'
else:
return False
def buyer_transfer_max(self):
return self.group.retention_money
def before_next_page(self):
print('@@@ stage 4 before next page ')
self.player.show_quality = True
print('@@@ buyer show quality')
self.player.show_DP = True
print('@@@ buyer show DP')
def vars_for_template(self):
context = self.player.vars_for_template()
context.update({'quality': self.group.get_seller_quality_display(),
'value': self.session.vars['values'][Constants.qualities.index(self.group.seller_quality)],
'cost': self.session.vars['costs'][Constants.qualities.index(self.group.seller_quality)]})
return context
class ResultsControl(Page):
timeout_seconds = 25
def is_displayed(self):
if self.session.vars['treatment'] == 0:
return True
else:
return False
def vars_for_template(self):
context = self.player.vars_for_template()
if self.group.buyer_accept:
context.update({'quality': self.group.get_seller_quality_display()})
return context
class ResultsTreatments(Page):
timeout_seconds = 25
def is_displayed(self):
if self.session.vars['treatment'] > 0:
return True
else:
return False
def vars_for_template(self):
context = self.player.vars_for_template()
if self.group.buyer_accept:
context.update({'quality': self.group.get_seller_quality_display()})
return context
class WaitBids(WaitPage):
template_name = '_game_trust_burden/WaitOther.html'
wait_for_all_groups = True
def after_all_players_arrive(self):
print('@@@ wait bids')
for player in self.subsession.get_players():
player.check_exit()
print('@@@ exit bid set')
for group in self.subsession.get_groups():
group.set_winner()
print('@@@ winner set')
group.set_retention_money()
print('@@@ retention money set')
for player in self.subsession.get_players():
print('@@@ for player in after_all_players_arrive')
player.show_bid = True
print('@@@ all show auction outcome')
def vars_for_template(self):
context = self.player.vars_for_template()
return context
class WaitTrade(WaitPage):
template_name = '_game_trust_burden/WaitTrade.html'
wait_for_all_groups = True
def is_displayed(self):
if self.player.is_winner:
return True
elif self.player.role() == 'buyer' and self.group.no_bid is not True:
return True
else:
return False
def vars_for_template(self):
context = self.player.vars_for_template()
return context
class WaitQuality(WaitPage):
template_name = '_game_trust_burden/WaitOther.html'
wait_for_all_groups = True
def is_displayed(self):
if self.session.vars['treatment'] > 0:
if self.player.is_winner and self.group.buyer_accept:
return True
elif self.player.role() == 'buyer' and self.group.buyer_accept:
return True
else:
return False
else:
return False
def vars_for_template(self):
context = self.player.vars_for_template()
return context
class WaitPayment(WaitPage):
template_name = '_game_trust_burden/WaitOther.html'
wait_for_all_groups = True
def is_displayed(self):
if self.session.vars['treatment'] > 2:
if self.player.role() == 'seller' and self.player.is_winner is not False and self.player.seller_exit is not True:
return True
elif self.player.role() == 'buyer' and self.group.buyer_accept:
return True
else:
return False
else:
return False
def vars_for_template(self):
context = self.player.vars_for_template()
return context
class WaitResult(WaitPage):
template_name = '_game_trust_burden/WaitResult.html'
wait_for_all_groups = True
def after_all_players_arrive(self):
for group in self.subsession.get_groups():
print('@@@ for group in after_all_players_arrive')
group.set_payoffs()
print('@@@ profit for round set')
group.set_buyer_transfer_pct()
print('@@@ transfer pct set')
for player in self.subsession.get_players():
print('@@@ for player in after_all_players_arrive')
player.show_trade = True
print('@@@ all show bid')
player.show_IP = True
print('@@@ all show IP')
player.show_quality = True
print('@@@ all show quality')
player.show_DP = True
print('@@@ all show DP')
player.show_response = True
print('@@@ all show response')
player.show_payoff = True
print('@@@ all show payoffs')
player.cumulative_payoff_set()
print('@@@ cumulative payoff set')
def vars_for_template(self):
context = self.player.vars_for_template()
return context
page_sequence = [
SellerBidding,
WaitBids,
BuyerTrade,
WaitTrade,
SellerQuality,
WaitQuality,
BuyerTransfer,
WaitPayment,
WaitResult,
ResultsControl,
ResultsTreatments,
]
| 2.578125
| 3
|
src/spaceone/inventory/model/collection_state_model.py
|
whdalsrnt/inventory
| 9
|
12781390
|
from mongoengine import *
from spaceone.core.model.mongo_model import MongoModel
class CollectionState(MongoModel):
collector_id = StringField(max_length=40)
job_task_id = StringField(max_length=40)
secret_id = StringField(max_length=40)
resource_id = StringField(max_length=40)
resource_type = StringField(max_length=255)
disconnected_count = IntField(default=0)
domain_id = StringField(max_length=40)
updated_at = DateTimeField(auto_now=True)
meta = {
'updatable_fields': [
'job_task_id',
'disconnected_count',
'updated_at'
],
'indexes': [
'collector_id',
'job_task_id',
'secret_id',
'resource_id',
'resource_type',
'disconnected_count',
'domain_id'
]
}
| 2.078125
| 2
|
setup.py
|
MartinThoma/vin_decoder
| 11
|
12781391
|
<filename>setup.py
# core modules
import os
import io
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(file_name):
"""Read a text file and return the content as a string."""
with io.open(os.path.join(os.path.dirname(__file__), file_name),
encoding='utf-8') as f:
return f.read()
config = {
'name': 'vin_decoder',
'version': '0.1.1',
'author': '<NAME>',
'author_email': '<EMAIL>',
'maintainer': '<NAME>',
'maintainer_email': '<EMAIL>',
'packages': ['vin_decoder'],
'scripts': ['bin/vin_decoder'],
'package_data': {'vin_decoder': ['templates/*', 'misc/*']},
'platforms': ['Linux', 'MacOS X', 'Windows'],
'url': 'https://github.com/MartinThoma/vin_decoder',
'license': 'MIT',
'description': 'VIN decoder (Vehicle identification number)',
'long_description': read('README.md'),
'long_description_content_type': 'text/markdown',
'install_requires': [
"argparse",
"nose"
],
'keywords': ['VIN', 'WMI', 'OBD'],
'download_url': 'https://github.com/MartinThoma/vin_c',
'classifiers': ['Development Status :: 7 - Inactive',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development',
'Topic :: Utilities'],
'zip_safe': False,
'test_suite': 'nose.collector'
}
setup(**config)
| 2.203125
| 2
|
abiflows/fireworks/integration_tests/itest_phonon.py
|
gmatteo/abiflows
| 4
|
12781392
|
<gh_stars>1-10
import pytest
import os
import glob
import unittest
import tempfile
import filecmp
import numpy.testing.utils as nptu
import abipy.data as abidata
from abipy.abio.factories import phonons_from_gsinput, PhononsFromGsFactory
from abipy.flowtk.tasks import TaskManager
from fireworks.core.rocket_launcher import rapidfire
from abiflows.fireworks.workflows.abinit_workflows import PhononFullFWWorkflow, PhononFWWorkflow
from abiflows.fireworks.utils.fw_utils import get_fw_by_task_index, load_abitask
from abiflows.core.testing import AbiflowsIntegrationTest, check_restart_task_type
#ABINIT_VERSION = "8.6.1"
# pytestmark = [pytest.mark.skipif(not has_abinit(ABINIT_VERSION), reason="Abinit version {} is not in PATH".format(ABINIT_VERSION)),
# pytest.mark.skipif(not has_fireworks(), reason="fireworks package is missing"),
# pytest.mark.skipif(not has_mongodb(), reason="no connection to mongodb")]
# pytestmark = pytest.mark.usefixtures("cleandb")
class ItestPhonon(AbiflowsIntegrationTest):
def itest_phonon_wf(self, lp, fworker, tmpdir, input_scf_phonon_si_low, use_autoparal, db_data):
"""
Tests the complete running of PhononFullFWWorkflow and PhononFWWorkflow
"""
# test at gamma.
ph_fac = PhononsFromGsFactory(qpoints=[[0,0,0]], ph_tol = {"tolvrs": 1.0e-7}, ddk_tol = {"tolwfr": 1.0e-16},
dde_tol = {"tolvrs": 1.0e-7}, wfq_tol = {"tolwfr": 1.0e-16})
# first run the phonon workflow with generation task
wf_gen = PhononFWWorkflow(input_scf_phonon_si_low, ph_fac, autoparal=use_autoparal,
initialization_info={"ngqpt": [1,1,1], "kppa": 100})
wf_gen.add_anaddb_ph_bs_fw(input_scf_phonon_si_low.structure, ph_ngqpt=[1,1,1], ndivsm=2, nqsmall=2)
wf_gen.add_mongoengine_db_insertion(db_data)
wf_gen.add_final_cleanup(["WFK"])
scf_id = wf_gen.scf_fw.fw_id
ph_generation_fw_id = wf_gen.ph_generation_fw.fw_id
old_new = wf_gen.add_to_db(lpad=lp)
scf_id = old_new[scf_id]
ph_generation_fw_id = old_new[ph_generation_fw_id]
# run all the workflow
rapidfire(lp, fworker, m_dir=str(tmpdir))
wf_gen = lp.get_wf_by_fw_id(scf_id)
assert wf_gen.state == "COMPLETED"
ph_task = load_abitask(get_fw_by_task_index(wf_gen, "phonon_0", index=-1))
# check the effect of the final cleanup
assert len(glob.glob(os.path.join(ph_task.outdir.path, "*_WFK"))) == 0
assert len(glob.glob(os.path.join(ph_task.outdir.path, "*_DEN1"))) > 0
assert len(glob.glob(os.path.join(ph_task.tmpdir.path, "*"))) == 0
assert len(glob.glob(os.path.join(ph_task.indir.path, "*"))) == 0
# check the save in the DB
from abiflows.database.mongoengine.abinit_results import PhononResult
with db_data.switch_collection(PhononResult) as PhononResult:
results = PhononResult.objects()
assert len(results) == 1
r = results[0]
assert r.abinit_input.structure.to_mgobj() == input_scf_phonon_si_low.structure
assert r.abinit_output.structure.to_mgobj() == input_scf_phonon_si_low.structure
assert r.abinit_input.ecut == input_scf_phonon_si_low['ecut']
assert r.abinit_input.kppa == 100
nptu.assert_array_equal(r.abinit_input.gs_input.to_mgobj()['ngkpt'], input_scf_phonon_si_low['ngkpt'])
nptu.assert_array_equal(r.abinit_input.ngqpt, [1,1,1])
ana_task = load_abitask(get_fw_by_task_index(wf_gen, "anaddb", index=None))
with tempfile.NamedTemporaryFile(mode="wb") as db_file:
db_file.write(r.abinit_output.phonon_bs.read())
db_file.seek(0)
assert filecmp.cmp(ana_task.phbst_path, db_file.name)
mrgddb_task = load_abitask(get_fw_by_task_index(wf_gen, "mrgddb", index=None))
# read/write in binary for py3k compatibility with mongoengine
with tempfile.NamedTemporaryFile(mode="wb") as db_file:
db_file.write(r.abinit_output.ddb.read())
db_file.seek(0)
assert filecmp.cmp(mrgddb_task.merged_ddb_path, db_file.name)
# then rerun a similar workflow, but completely generated at its creation
wf_full = PhononFullFWWorkflow(input_scf_phonon_si_low, ph_fac, autoparal=use_autoparal)
wf_full.add_anaddb_ph_bs_fw(input_scf_phonon_si_low.structure, ph_ngqpt=[1,1,1], ndivsm=2, nqsmall=2)
wf_full.add_mongoengine_db_insertion(db_data)
scf_id = wf_full.scf_fw.fw_id
old_new = wf_full.add_to_db(lpad=lp)
scf_id = old_new[scf_id]
# run all the workflow
rapidfire(lp, fworker, m_dir=str(tmpdir))
wf_full = lp.get_wf_by_fw_id(scf_id)
assert wf_full.state == "COMPLETED"
# the full workflow doesn't contain the generation FW and the cleanup tasks, but should have the same
# amount of perturbations.
if use_autoparal:
diff = 1
else:
diff = 2
assert len(wf_full.id_fw) + diff == len(wf_gen.id_fw)
if self.check_numerical_values:
gen_scf_task = load_abitask(get_fw_by_task_index(wf_gen, "scf", index=-1))
with gen_scf_task.open_gsr() as gen_gsr:
gen_energy = gen_gsr.energy
assert gen_energy == pytest.approx(-240.264972012, rel=0.01)
gen_ana_task = load_abitask(get_fw_by_task_index(wf_gen, "anaddb", index=None))
with gen_ana_task.open_phbst() as gen_phbst:
gen_phfreq = gen_phbst.phbands.phfreqs[0, 3]
assert gen_phfreq == pytest.approx(0.06029885, rel=0.1)
full_scf_task = load_abitask(get_fw_by_task_index(wf_gen, "scf", index=-1))
with full_scf_task.open_gsr() as full_gsr:
full_energy = full_gsr.energy
assert full_energy == pytest.approx(-240.264972012, rel=0.01)
full_ana_task = load_abitask(get_fw_by_task_index(wf_gen, "anaddb", index=None))
with full_ana_task.open_phbst() as full_phbst:
full_phfreqs = full_phbst.phbands.phfreqs[0, 3]
assert full_phfreqs == pytest.approx(0.06029885, rel=0.1)
assert gen_energy == pytest.approx(full_energy, rel=1e-6)
assert gen_phfreq == pytest.approx(full_phfreqs, rel=1e-6)
def itest_not_converged(self, lp, fworker, tmpdir, input_scf_phonon_si_low):
"""
Tests the missed convergence and restart for all the different kinds of tasks
"""
# set a point not belonging to the grid so to trigger the calculation of WFQ and gamma for the DDE and DDK
ph_inp = phonons_from_gsinput(input_scf_phonon_si_low, qpoints=[[0, 0, 0], [0.11111, 0.22222, 0.33333]],
ph_tol = {"tolvrs": 1.0e-7}, ddk_tol = {"tolwfr": 1.0e-16},
dde_tol = {"tolvrs": 1.0e-7}, wfq_tol = {"tolwfr": 1.0e-16})
ph_inp.set_vars(nstep=3)
wf_full = PhononFullFWWorkflow(input_scf_phonon_si_low, ph_inp, autoparal=False)
scf_id = wf_full.scf_fw.fw_id
old_new = wf_full.add_to_db(lpad=lp)
scf_id = old_new[scf_id]
# run the scf
rapidfire(lp, fworker, m_dir=str(tmpdir), nlaunches=1)
# pause all the remaining workflow and reignite the task types one by one to check the restart
lp.pause_wf(scf_id)
# DDK
check_restart_task_type(lp, fworker, tmpdir, scf_id, "ddk_0")
# reignite and run the other DDK to get to the DDE
wf = lp.get_wf_by_fw_id(scf_id)
lp.resume_fw(get_fw_by_task_index(wf, "ddk_1", index=1).fw_id)
lp.resume_fw(get_fw_by_task_index(wf, "ddk_2", index=1).fw_id)
rapidfire(lp, fworker, m_dir=str(tmpdir))
# DDE
check_restart_task_type(lp, fworker, tmpdir, scf_id, "dde_0")
# NSCF
check_restart_task_type(lp, fworker, tmpdir, scf_id, "nscf_wfq_0")
# phonon
check_restart_task_type(lp, fworker, tmpdir, scf_id, "phonon_0")
# don't run the wf until the end to save time. Other tests covered that.
wf = lp.get_wf_by_fw_id(scf_id)
assert wf.state == "PAUSED"
def itest_phonon_wfq_wf(self, lp, fworker, tmpdir, input_scf_phonon_si_low, db_data):
"""
Tests the PhononFullFWWorkflow and PhononFWWorkflow
"""
qpt = [[0.1111,0.2222,0.3333]]
# test at gamma.
ph_fac = PhononsFromGsFactory(qpoints=qpt, ph_tol = {"tolvrs": 1.0e-7},
wfq_tol = {"tolwfr": 1.0e-16}, with_ddk=False, with_dde=False)
# first run the phonon workflow with generation task
wf_gen = PhononFWWorkflow(input_scf_phonon_si_low, ph_fac, autoparal=False,
initialization_info={"qpoints": qpt, "kppa": 100})
wf_gen.add_mongoengine_db_insertion(db_data)
scf_id = wf_gen.scf_fw.fw_id
old_new = wf_gen.add_to_db(lpad=lp)
scf_id = old_new[scf_id]
# run all the workflow
rapidfire(lp, fworker, m_dir=str(tmpdir))
wf_gen = lp.get_wf_by_fw_id(scf_id)
assert wf_gen.state == "COMPLETED"
# then rerun a similar workflow, but completely generated at its creation
wf_full = PhononFullFWWorkflow(input_scf_phonon_si_low, ph_fac, autoparal=False)
scf_id = wf_full.scf_fw.fw_id
old_new = wf_full.add_to_db(lpad=lp)
scf_id = old_new[scf_id]
# run all the workflow
rapidfire(lp, fworker, m_dir=str(tmpdir))
wf_full = lp.get_wf_by_fw_id(scf_id)
assert wf_full.state == "COMPLETED"
| 1.804688
| 2
|
src/playlist.py
|
jnsougata/Ditch-YouTube-API
| 14
|
12781393
|
<reponame>jnsougata/Ditch-YouTube-API<gh_stars>10-100
import re
from .videobulk import _VideoBulk
from .auxiliary import _src, _filter
class Playlist:
def __init__(self, playlist_id: str):
"""
:param str playlist_id: the _id of the playlist
"""
if 'youtube.com' in playlist_id:
self.id = re.findall(r'=(.*)', playlist_id)[0]
else:
self.id = playlist_id
@property
def name(self):
"""
:return: the name of the playlist
"""
raw = _src(f'https://www.youtube.com/playlist?list={self.id}')
name_data = re.findall(r"{\"title\":\"(.*?)\"", raw)
return name_data[0] if name_data else None
@property
def url(self):
"""
:return: url of the playlist
"""
return f'https://www.youtube.com/playlist?list={self.id}'
@property
def video_count(self):
"""
:return: total number of videos in that playlist
"""
raw = _src(f'https://www.youtube.com/playlist?list={self.id}')
video_count = re.findall(r"stats\":\[{\"runs\":\[{\"text\":\"(.*?)\"", raw)
return video_count[0] if video_count else None
def videos(self, limit: int):
"""
:param int limit: number of videos the user want from the playlist
:return: list of < video objects > for each video in the playlist (consider limit)
"""
raw = _src(f'https://www.youtube.com/playlist?list={self.id}')
videos = re.findall(r"videoId\":\"(.*?)\"", raw)
pure = _filter(limit=limit, iterable=videos)
return _VideoBulk(pure)
@property
def thumbnail(self):
"""
:return: url of the thumbnail of the playlist
"""
raw = _src(f'https://www.youtube.com/playlist?list={self.id}')
thumbnails = re.findall(r"og:image\" content=\"(.*?)\?", raw)
return thumbnails[0] if thumbnails else None
@property
def info(self):
"""
:return: a dict containing playlist info
dict = {
'name': -> str,
'url': -> str,
'video_count': -> int,
'videos': -> bulk,
'thumbnail': -> str,
}
"""
raw = _src(f'https://www.youtube.com/playlist?list={self.id}')
name_data = re.findall(r"{\"title\":\"(.*?)\"", raw)
video_count_data = re.findall(r"stats\":\[{\"runs\":\[{\"text\":\"(.*?)\"", raw)
thumbnails = re.findall(r"og:image\" content=\"(.*?)\?", raw)
return {
'name': name_data[0] if len(name_data) != 0 else None,
'video_count': video_count_data[0] if video_count_data else None,
'videos': _filter(re.findall(r"videoId\":\"(.*?)\"", raw)),
'thumbnail': thumbnails[0] if len(thumbnails) != 0 else None,
'url': f'https://www.youtube.com/playlist?list={self.id}'
}
| 3.140625
| 3
|
OOSqrt.py
|
sidhu177/pythonprog
| 2
|
12781394
|
<reponame>sidhu177/pythonprog
"""
Created on Sun Jun 10 17:33:07 2018
Taken from Python 3 Object Oriented Programming by <NAME>, Apress
"""
import math
class Point:
def move(self, x,y):
self.x = x
self.y = y
def reset(self):
self.move(0,0)
def calculate_distance(self,other_point):
return math.sqrt((self.x-other_point.x)**2+(self.y - other_point.y)**2)
| 3.703125
| 4
|
pylib/tools/root_helpers.py
|
gnafit/gna
| 5
|
12781395
|
<filename>pylib/tools/root_helpers.py
import ROOT as R
class TFileContext(object):
"""A context for opening a ROOT file"""
def __init__(self, filename, mode='read'):
self.filename = filename
self.mode = mode
def __enter__(self):
self.file = R.TFile(self.filename, self.mode)
if self.file.IsZombie():
raise Exception('Unable to read ({}) ROOT file: {}'.format(self.mode, self.filename))
return self.file
def __exit__(self, exc_type, exc_value, traceback):
self.file.Close()
| 2.703125
| 3
|
src/quantum/azext_quantum/vendored_sdks/azure_quantum/aio/_quantum_client.py
|
haroonf/azure-cli-extensions
| 1
|
12781396
|
<reponame>haroonf/azure-cli-extensions
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from azure.core.rest import AsyncHttpResponse, HttpRequest
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import QuantumClientConfiguration
from .operations import JobsOperations, ProvidersOperations, QuotasOperations, StorageOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class QuantumClient:
"""Azure Quantum REST API client.
:ivar jobs: JobsOperations operations
:vartype jobs: azure.quantum._client.aio.operations.JobsOperations
:ivar providers: ProvidersOperations operations
:vartype providers: azure.quantum._client.aio.operations.ProvidersOperations
:ivar storage: StorageOperations operations
:vartype storage: azure.quantum._client.aio.operations.StorageOperations
:ivar quotas: QuotasOperations operations
:vartype quotas: azure.quantum._client.aio.operations.QuotasOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Azure subscription ID. This is a GUID-formatted string (e.g.
00000000-0000-0000-0000-000000000000).
:type subscription_id: str
:param resource_group_name: Name of an Azure resource group.
:type resource_group_name: str
:param workspace_name: Name of the workspace.
:type workspace_name: str
:param base_url: Service URL. Default value is 'https://quantum.azure.com'.
:type base_url: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
resource_group_name: str,
workspace_name: str,
base_url: str = "https://quantum.azure.com",
**kwargs: Any
) -> None:
self._config = QuantumClientConfiguration(credential=credential, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.jobs = JobsOperations(self._client, self._config, self._serialize, self._deserialize)
self.providers = ProvidersOperations(self._client, self._config, self._serialize, self._deserialize)
self.storage = StorageOperations(self._client, self._config, self._serialize, self._deserialize)
self.quotas = QuotasOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "QuantumClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 1.375
| 1
|
app/view_models/articleclass.py
|
huzhipeng2388/Flask_Blog
| 1
|
12781397
|
class ArticleClassModel:
def __init__(self,data):
self.id = data.acid
self.name = data.name
| 2.1875
| 2
|
algorithm/strings/too-simple/missing-list-elements.py
|
atb00ker/scripts-lab
| 2
|
12781398
|
<reponame>atb00ker/scripts-lab
#!/bin/python3
# Twitter 2019 Internship Coding Interview Round
def missingWords(s, t):
sList = s.split()
tList = t.split()
nList = []
j = 0
tLen = len(tList)
for index, item in enumerate(sList):
if item == tList[j]:
j += 1
if j == tLen:
nList += sList[index+1:]
break
else:
nList.append(item)
return nList
s = "I am using HackerRank to improve programming"
t = "am HackerRank to improve"
missingWords(s, t)
| 3.640625
| 4
|
data_analysis_byPro/Day09-01.py
|
yunjung-lee/class_python_data
| 0
|
12781399
|
<gh_stars>0
# SQLite 접속하기.
import sqlite3
con = sqlite3.connect('c:/temp/userDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 테이블 만들기
try :
sql = "CREATE TABLE userTable(userID CHAR(10), userName CHAR(5), userAge INT)"
cur.execute(sql)
except :
pass
sql = "INSERT INTO userTable VALUES('GGG','에이', 21);"
cur.execute(sql)
sql = "INSERT INTO userTable VALUES('NNN','삐이', 23);"
cur.execute(sql)
sql = "INSERT INTO userTable VALUES('YYY','씨이', 35);"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok!')
| 3.71875
| 4
|
Parser-hybrid/nparser/neural/recur_cells/rnn_cell.py
|
sb-b/BOUN-PARSE
| 12
|
12781400
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from nparser.neural.recur_cells.base_cell import BaseCell
from nparser.neural.linalg import linear
#***************************************************************
class RNNCell(BaseCell):
""" """
#=============================================================
def __call__(self, inputs, state, scope=None):
""" """
with tf.variable_scope(scope or type(self).__name__):
inputs_list = [inputs, state]
hidden_act = linear(inputs_list,
self.output_size,
add_bias=True,
moving_params=self.moving_params)
hidden = self.recur_func(hidden_act)
return hidden, hidden
#=============================================================
@property
def state_size(self):
return self.output_size
| 2.453125
| 2
|
tests/suite/test_healthcheck_uri.py
|
snebel29/kubernetes-ingress
| 3,803
|
12781401
|
<reponame>snebel29/kubernetes-ingress
import pytest
import requests
from suite.resources_utils import ensure_connection
@pytest.mark.ingresses
@pytest.mark.parametrize('ingress_controller, expected_responses',
[
pytest.param({"extra_args": ["-health-status=true",
"-health-status-uri=/something-va(l)id/blabla"]},
{"/something-va(l)id/blabla": 200, "/nginx-health": 404},
id="custom-health-status-uri"),
pytest.param({"extra_args": ["-health-status=true"]},
{"/something-va(l)id/blabla": 404, "/nginx-health": 200},
id="default-health-status-uri"),
pytest.param({"extra_args": ["-health-status=false"]},
{"/something-va(l)id/blabla": 404, "/nginx-health": 404},
id="disable-health-status")
],
indirect=["ingress_controller"])
class TestHealthStatusURI:
def test_response_code(self, ingress_controller_endpoint, ingress_controller, expected_responses):
for uri in expected_responses:
req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port}{uri}"
ensure_connection(req_url, expected_responses[uri])
resp = requests.get(req_url)
assert resp.status_code == expected_responses[uri],\
f"Expected {expected_responses[uri]} code for {uri} but got {resp.status_code}"
| 2.171875
| 2
|
src/s3.py
|
ytyaru/Python.Dynamic.Create.Type.20191229160958
| 0
|
12781402
|
#!/usr/bin/env python3
# coding: utf8
class MyClass: pass
class MyClass1:
cls_attr = 1
| 1.859375
| 2
|
cocoa_folder/dealornodeal/neural/rl_trainer.py
|
s-akanksha/DialoGraph_ICLR21
| 12
|
12781403
|
<filename>cocoa_folder/dealornodeal/neural/rl_trainer.py
import argparse
import random
import json
import numpy as np
import copy
from collections import defaultdict
import torch
import torch.nn as nn
from torch.autograd import Variable
from cocoa.neural.rl_trainer import Statistics
from core.controller import Controller
from neural.trainer import Trainer
from utterance import UtteranceBuilder
class RLTrainer(Trainer):
def __init__(self, agents, scenarios, train_loss, optim, training_agent=0, reward_func='margin'):
self.agents = agents
self.scenarios = scenarios
self.training_agent = training_agent
self.model = agents[training_agent].env.model
self.train_loss = train_loss
self.optim = optim
self.cuda = False
self.best_valid_reward = None
self.all_rewards = [[], []]
self.reward_func = reward_func
def update(self, batch_iter, reward, model, discount=0.95):
model.train()
model.generator.train()
nll = []
# batch_iter gives a dialogue
dec_state = None
for batch in batch_iter:
if not model.stateful:
dec_state = None
enc_state = dec_state.hidden if dec_state is not None else None
outputs, _, dec_state = self._run_batch(batch, None, enc_state) # (seq_len, batch_size, rnn_size)
loss, _ = self.train_loss.compute_loss(batch.targets, outputs) # (seq_len, batch_size)
nll.append(loss)
# Don't backprop fully.
if dec_state is not None:
dec_state.detach()
nll = torch.cat(nll) # (total_seq_len, batch_size)
rewards = [Variable(torch.zeros(1, 1).fill_(reward))]
for i in xrange(1, nll.size(0)):
rewards.append(rewards[-1] * discount)
rewards = rewards[::-1]
rewards = torch.cat(rewards)
loss = nll.squeeze().dot(rewards.squeeze())
model.zero_grad()
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), 1.)
self.optim.step()
def _get_scenario(self, scenario_id=None, split='train'):
scenarios = self.scenarios[split]
if scenario_id is None:
scenario = random.choice(scenarios)
else:
scenario = scenarios[scenario_id % len(scenarios)]
return scenario
def _get_controller(self, scenario, split='train'):
# Randomize
if random.random() < 0.5:
scenario = copy.deepcopy(scenario)
scenario.kbs = (scenario.kbs[1], scenario.kbs[0])
sessions = [self.agents[0].new_session(0, scenario.kbs[0]),
self.agents[1].new_session(1, scenario.kbs[1])]
return Controller(scenario, sessions)
def validate(self, args):
split = 'dev'
self.model.eval()
total_stats = Statistics()
print '='*20, 'VALIDATION', '='*20
for scenario in self.scenarios[split][:200]:
controller = self._get_controller(scenario, split=split)
example = controller.simulate(args.max_turns, verbose=args.verbose)
session = controller.sessions[self.training_agent]
reward = self.get_reward(example, session)
stats = Statistics(reward=reward)
total_stats.update(stats)
print '='*20, 'END VALIDATION', '='*20
self.model.train()
return total_stats
def save_best_checkpoint(self, checkpoint, opt, valid_stats):
if self.best_valid_reward is None or valid_stats.mean_reward() > self.best_valid_reward:
self.best_valid_reward = valid_stats.mean_reward()
path = '{root}/{model}_best.pt'.format(
root=opt.model_path,
model=opt.model_filename)
print 'Save best checkpoint {path}'.format(path=path)
torch.save(checkpoint, path)
def checkpoint_path(self, episode, opt, stats):
path = '{root}/{model}_reward{reward:.2f}_e{episode:d}.pt'.format(
root=opt.model_path,
model=opt.model_filename,
reward=stats.mean_reward(),
episode=episode)
return path
def learn(self, args):
for i in xrange(args.num_dialogues):
# Rollout
scenario = self._get_scenario(scenario_id=i)
controller = self._get_controller(scenario, split='train')
example = controller.simulate(args.max_turns, verbose=args.verbose)
for session_id, session in enumerate(controller.sessions):
# Only train one agent
if session_id != self.training_agent:
continue
# Compute reward
reward = self.get_reward(example, session)
# Standardize the reward
all_rewards = self.all_rewards[session_id]
all_rewards.append(reward)
print 'step:', i
print 'reward:', reward
reward = (reward - np.mean(all_rewards)) / max(1e-4, np.std(all_rewards))
print 'scaled reward:', reward
print 'mean reward:', np.mean(all_rewards)
batch_iter = session.iter_batches()
T = batch_iter.next()
self.update(batch_iter, reward, self.model, discount=args.discount_factor)
if i > 0 and i % 100 == 0:
valid_stats = self.validate(args)
self.drop_checkpoint(args, i, valid_stats, model_opt=self.agents[self.training_agent].env.model_args)
def _is_agreed(self, example):
if not example.outcome['valid_deal']:
return False
return True
def _margin_reward(self, example):
# No agreement
if not self._is_agreed(example):
print 'No agreement'
return {0: -1, 1: -1}
return example.outcome['reward']
def _length_reward(self, example):
# No agreement
if not self._is_agreed(example):
print 'No agreement'
return {0: -1, 1: -1}
# Encourage long dialogue
length = len(example.events) / 10.
rewards = {0: length, 1: length}
return rewards
def _fair_reward(self, example):
# No agreement
if not self._is_agreed(example):
print 'No agreement'
return {0: -1, 1: -1}
rewards = example.outcome['reward'].values()
diff = abs(rewards[0] - rewards[1]) * -0.1
return {0: diff, 1: diff}
def get_reward(self, example, session):
if self.reward_func == 'margin':
rewards = self._margin_reward(example)
elif self.reward_func == 'fair':
rewards = self._fair_reward(example)
elif self.reward_func == 'length':
rewards = self._length_reward(example)
reward = rewards[session.agent]
return reward
| 2.25
| 2
|
kino-webhook/handler.py
|
DongjunLee/kino-bot
| 109
|
12781404
|
import arrow
import json
import requests
def kanban_webhook(event, context):
input_body = json.loads(event['body'])
print(event['body'])
action = input_body["action"]
action_type = action["type"]
if action_type == "createCard":
list_name, card_name = get_create_card(action["data"])
elif action_type == "updateCard":
list_name, card_name = get_update_card(action["data"])
kanban_list = ["DOING", "BREAK", "DONE"]
if list_name in kanban_list:
payload = make_payload(action=list_name, msg=card_name)
r = send_to_kino({"text": payload})
response = {
"statusCode": r.status_code
}
response = {
"statusCode": 400
}
return response
def get_create_card(action_data):
list_name = action_data["list"]["name"].upper()
card_name = action_data["card"]["name"]
return list_name, card_name
def get_update_card(action_data):
list_name = action_data["listAfter"]["name"].upper()
card_name = action_data["card"]["name"]
return list_name, card_name
def make_payload(action=None, msg=None, time=None):
if time is None:
now = arrow.now()
time = now.format(" MMMM d, YYYY") + " at " + now.format("HH:mmA")
payload = {
"action": "KANBAN_" + action,
"msg": msg,
"time": time
}
return json.dumps(payload)
def send_to_kino(data):
return requests.post("https://hooks.slack.com/services/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", data=json.dumps(data))
| 2.53125
| 3
|
usp-introducao-python/numerosimparesnaturais.py
|
kbcvcbk/cesar-school
| 2
|
12781405
|
<reponame>kbcvcbk/cesar-school
n=int(input("Digite o valor de n: "))
cont=1
while n!=0:
if cont%2!=0:
print(cont)
cont=cont+1
n=n-1
else:
cont=cont+1
| 3.5625
| 4
|
pytip/__main__.py
|
pybites/pybites-tips
| 9
|
12781406
|
<reponame>pybites/pybites-tips
import argparse
from pytip.tips import PyBitesTips
def main():
parser = argparse.ArgumentParser(description='Search term')
parser.add_argument("-s", "--search", type=str,
help='Search PyBites Python tips')
parser.add_argument("-p", "--pager", action='store_true',
help='Go through the resulting tips one by one')
args = parser.parse_args()
pb_tips = PyBitesTips(use_pager=args.pager)
if args.search:
tips = pb_tips.filter_tips(args.search)
pb_tips.show_tips(tips)
else:
pb_tips()
if __name__ == '__main__':
main()
| 2.890625
| 3
|
app/github_client/services/__init__.py
|
TennaGraph/TennaGraph
| 7
|
12781407
|
<gh_stars>1-10
from .git_hub_eip import GitHubEIP
from .git_hub_db import GitHubDB
| 1.039063
| 1
|
apps/home/migrations/0009_auto_20220206_1717.py
|
Aimene-BAHRI/ecole_de_formation
| 1
|
12781408
|
# Generated by Django 3.2.6 on 2022-02-06 17:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0008_auto_20220202_1858'),
]
operations = [
migrations.RemoveField(
model_name='student',
name='activities',
),
migrations.AddField(
model_name='student',
name='activities',
field=models.ManyToManyField(blank=True, null=True, related_name='fils', to='home.Activity'),
),
]
| 1.59375
| 2
|
test/test_dca.py
|
petbox-dev/dca
| 36
|
12781409
|
<filename>test/test_dca.py
"""
Decline Curve Models
Unit Testing
Copyright © 2020 <NAME>
Author
------
<NAME>
<NAME>
Notes
-----
Created on August 5, 2019
"""
import sys
import warnings
from datetime import timedelta
import pytest # type: ignore
import hypothesis
from hypothesis import assume, given, settings, note, strategies as st
from typing import Any, Type, TypeVar, Union
from math import isnan
import numpy as np
from petbox import dca
# local import
from .data import rate as q_data, time as t_data # noqa
def signif(x: np.ndarray, p: int) -> np.ndarray:
x = np.asarray(x)
x_positive = np.where(np.isfinite(x) & (x != 0), np.abs(x), 10**(p - 1))
mags = 10 ** (p - 1 - np.floor(np.log10(x_positive)))
return np.round(x * mags) / mags
def is_float_array_like(arr: Any, like: np.ndarray) -> bool:
return (
isinstance(arr, np.ndarray)
and arr.dtype == np.dtype(np.float64)
and arr.shape == like.shape
)
def is_monotonic_nonincreasing(arr: np.ndarray) -> bool:
# a = np.diff(signif(arr, 6))
a = np.diff(arr, 6)
return np.all(a <= 0.0)
def is_monotonic_increasing(arr: np.ndarray) -> bool:
# a = np.diff(signif(arr, 6))
a = np.diff(arr, 6)
return np.all(a > 0.0)
def is_monotonic_nondecreasing(arr: np.ndarray) -> bool:
# a = np.diff(signif(arr, 6))
a = np.diff(arr, 6)
return np.all(a >= 0.0)
T = TypeVar('T', bound=dca.DeclineCurve)
def model_floats(model_cls: Type[T], param: str) -> st.SearchStrategy[float]:
p = model_cls.get_param_desc(param)
return st.floats(p.lower_bound, p.upper_bound, # type: ignore
exclude_min=p.exclude_lower_bound, exclude_max=p.exclude_upper_bound)
def check_model(model: dca.DeclineCurve, qi: float) -> bool:
t = dca.get_time()
with warnings.catch_warnings(record=True) as w:
if isinstance(model, dca.Duong):
t0 = 1e-3
assert np.isclose(model.rate(np.array(1.0)), qi, atol=1e-10)
assert np.isclose(model.cum(np.array(1.0)), qi / model.a, atol=1e-10)
else:
t0 = 0.0
assert np.isclose(model.rate(np.array(0.0)), qi, atol=1e-10)
assert np.isclose(model.cum(np.array(0.0)), 0.0, atol=1e-10)
rate = model.rate(t)
assert is_float_array_like(rate, t)
# assert is_monotonic_nonincreasing(rate)
assert np.all(np.isfinite(rate))
cum = model.cum(t)
assert is_float_array_like(cum, t)
# if not isinstance(model, dca.PLE):
# exclude PLE as it is numerically integrated
# assert is_monotonic_nondecreasing(cum)
assert np.all(np.isfinite(cum))
mvolume = model.monthly_vol(t)
mavg_rate = np.gradient(mvolume, t)
# assert is_float_array_like(mvolume, t)
# assert is_monotonic_nonincreasing(mavg_rate)
assert np.all(np.isfinite(mvolume))
assert np.all(np.isfinite(mavg_rate))
ivolume = model.interval_vol(t)
iavg_rate = np.gradient(ivolume, t)
# assert is_float_array_like(ivolume, t)
# assert is_monotonic_nonincreasing(iavg_rate)
assert np.all(np.isfinite(ivolume))
assert np.all(np.isfinite(iavg_rate))
evolume = model.monthly_vol_equiv(t)
mavg_rate = np.gradient(evolume, t)
# assert is_float_array_like(evolume, t)
# assert is_monotonic_nonincreasing(mavg_rate)
assert np.all(np.isfinite(evolume))
assert np.all(np.isfinite(mavg_rate))
D = model.D(t)
assert is_float_array_like(D, t)
# assert is_monotonic_nonincreasing(D)
assert np.all(np.isfinite(D))
D2 = model._Dfn2(t)
assert is_float_array_like(D2, t)
# assert is_monotonic_nonincreasing(D2)
assert np.all(np.isfinite(D2))
beta = model.beta(t)
assert is_float_array_like(beta, t)
# TODO: what are the invariants for beta?
D_inferred = beta / t
# assert is_monotonic_nonincreasing(D_inferred)
assert np.all(np.isfinite(beta))
b = model.b(t)
assert is_float_array_like(b, t)
assert np.all(np.isfinite(b))
return True
def check_yield_model(model: Union[dca.SecondaryPhase, dca.WaterPhase],
phase: str, qi: float) -> bool:
t = dca.get_time()
with warnings.catch_warnings(record=True) as w:
t0 = 0.0
assert np.isclose(model.cum(np.array(0.0)), 0.0, atol=1e-10)
if phase == 'secondary' and isinstance(model, dca.SecondaryPhase):
gor = model.gor(t)
assert is_float_array_like(gor, t)
assert np.all(np.isfinite(gor))
cgr = model.cgr(t)
assert is_float_array_like(cgr, t)
assert np.all(np.isfinite(cgr))
with pytest.raises(ValueError) as e:
wor = model.wor(t) # type: ignore
assert is_float_array_like(wor, t)
assert np.all(np.isfinite(wor))
elif phase == 'water' and isinstance(model, dca.WaterPhase):
with pytest.raises(ValueError) as e:
gor = model.gor(t) # type: ignore
assert is_float_array_like(gor, t)
assert np.all(np.isfinite(gor))
cgr = model.cgr(t) # type: ignore
assert is_float_array_like(cgr, t)
assert np.all(np.isfinite(cgr))
wor = model.wor(t)
assert is_float_array_like(wor, t)
assert np.all(np.isfinite(wor))
rate = model.rate(t)
assert is_float_array_like(rate, t)
# assert is_monotonic_nonincreasing(rate)
assert np.all(np.isfinite(rate))
cum = model.cum(t)
assert is_float_array_like(cum, t)
# if not isinstance(model, dca.PLE):
# exclude PLE as it is numerically integrated
# assert is_monotonic_nondecreasing(cum)
assert np.all(np.isfinite(cum))
mvolume = model.monthly_vol(t)
mavg_rate = np.gradient(mvolume, t)
# assert is_float_array_like(mvolume, t)
# assert is_monotonic_nonincreasing(mavg_rate)
assert np.all(np.isfinite(mvolume))
assert np.all(np.isfinite(mavg_rate))
ivolume = model.interval_vol(t, t0=t0)
iavg_rate = np.gradient(ivolume, t)
# assert is_float_array_like(ivolume, t)
# assert is_monotonic_nonincreasing(iavg_rate)
assert np.all(np.isfinite(ivolume))
assert np.all(np.isfinite(iavg_rate))
evolume = model.monthly_vol_equiv(t)
mavg_rate = np.gradient(evolume, t)
# assert is_float_array_like(evolume, t)
# assert is_monotonic_nonincreasing(mavg_rate)
assert np.all(np.isfinite(evolume))
assert np.all(np.isfinite(mavg_rate))
D = model.D(t)
assert is_float_array_like(D, t)
# assert is_monotonic_nonincreasing(D)
# assert np.all(np.isfinite(D))
D2 = model._Dfn2(t)
assert is_float_array_like(D2, t)
# assert is_monotonic_nonincreasing(D2)
# assert np.all(np.isfinite(D2))
beta = model.beta(t)
assert is_float_array_like(beta, t)
# TODO: what are the invariants for beta?
# D_inferred = beta / t
# assert is_monotonic_nonincreasing(D_inferred)
# assert np.all(np.isfinite(beta))
b = model.b(t)
assert is_float_array_like(b, t)
assert np.all(np.isfinite(b))
# der = model._derfn(np.array([0.0]))
# NN = model._NNfn(np.array([0.0]))
return True
def check_transient_model(model: dca.THM) -> bool:
t = dca.get_time()
with warnings.catch_warnings(record=True) as w:
t_D = model.transient_D(t)
assert is_float_array_like(t_D, t)
# assert is_monotonic_nonincreasing(t_D)
assert np.all(np.isfinite(t_D))
t_beta = model.transient_beta(t)
assert is_float_array_like(t_beta, t)
# assert is_monotonic_nonincreasing(t_beta)
assert np.all(np.isfinite(t_beta))
t_b = model.transient_b(t)
assert is_float_array_like(t_b, t)
# assert is_monotonic_nonincreasing(t_b)
assert np.all(np.isfinite(t_b))
return True
def check_transient_model_rate_cum(model: dca.THM) -> bool:
# these are computationally expensive, so check separately
t = dca.get_time()
with warnings.catch_warnings(record=True) as w:
t_N = model.transient_cum(t)
assert is_float_array_like(t_N, t)
# assert is_monotonic_nondecreasing(t_N)
assert np.all(np.isfinite(t_N))
t_q = model.transient_rate(t)
assert is_float_array_like(t_q, t)
# assert is_monotonic_nonincreasing(t_q)
assert np.all(np.isfinite(t_q))
return True
def test_time_arrays() -> None:
t = dca.get_time()
assert is_monotonic_increasing(t)
int_t = dca.get_time_monthly_vol()
thm = dca.THM(1000, 0.5, 2.0, 1.0, 30.0)
def test_nulls() -> None:
t = dca.get_time()
primary = dca.NullPrimaryPhase()
assert np.allclose(primary.rate(t), 0.0)
assert np.allclose(primary.cum(t), 0.0)
assert np.allclose(primary.D(t), 0.0)
assert np.allclose(primary.beta(t), 0.0)
assert np.allclose(primary.b(t), 0.0)
assert np.allclose(primary._Dfn2(t), 0.0)
secondary = dca.NullAssociatedPhase()
assert np.allclose(secondary.gor(t), 0.0)
assert np.allclose(secondary.cgr(t), 0.0)
assert np.allclose(secondary.wor(t), 0.0)
assert np.allclose(secondary.rate(t), 0.0)
assert np.allclose(secondary.cum(t), 0.0)
assert np.allclose(secondary.D(t), 0.0)
assert np.allclose(secondary.beta(t), 0.0)
assert np.allclose(secondary.b(t), 0.0)
assert np.allclose(secondary._Dfn2(t), 0.0)
def test_associated() -> None:
with pytest.raises(TypeError) as e:
sec = dca.AssociatedPhase() # type: ignore
with pytest.raises(TypeError) as e:
sec = dca.SecondaryPhase() # type: ignore
with pytest.raises(TypeError) as e:
wtr = dca.WaterPhase() # type: ignore
with pytest.raises(TypeError) as e:
bth = dca.BothAssociatedPhase() # type: ignore
# TODO: use bounds, after we use testing to set them
@given(
qi=st.floats(1e-10, 1e6),
Di=st.floats(1e-10, 1e10),
Dinf=st.floats(1e-10, 1e10),
n=st.floats(1e-10, 1.0, exclude_max=True)
)
def test_PLE(qi: float, Di: float, Dinf: float, n: float) -> None:
assume(Dinf <= Di)
ple = dca.PLE.from_params((qi, Di, Dinf, n))
ple = dca.PLE(qi, Di, Dinf, n)
check_model(ple, qi)
@given(
qi=st.floats(1e-10, 1e6),
tau=st.floats(1e-10, 1e4),
n=st.floats(1e-10, 1.0, exclude_max=True)
)
def test_SE(qi: float, tau: float, n: float) -> None:
se = dca.SE.from_params((qi, tau, n))
se = dca.SE(qi, tau, n)
check_model(se, qi)
@given(
qi=st.floats(1e-10, 1e6),
a=st.floats(1.0, 10.0),
m=st.floats(1.0, 10.0, exclude_min=True)
)
def test_Duong(qi: float, a: float, m: float) -> None:
duong = dca.Duong.from_params((qi, a, m))
duong = dca.Duong(qi, a, m)
check_model(duong, qi)
@given(
qi=st.floats(1e-10, 1e6),
Di=st.floats(0.0, 1.0, exclude_max=True),
bf=st.floats(0.0, 2.0),
telf=st.floats(0.0, 1e6)
)
def test_THM(qi: float, Di: float, bf: float, telf: float) -> None:
thm = dca.THM.from_params((qi, Di, 2.0, bf, telf, 0.0, 0.0))
thm = dca.THM(qi, Di, 2.0, bf, telf, 0.0, 0.0)
check_model(thm, qi)
check_transient_model(thm)
thm = dca.THM(qi, Di, 2.0, 0.0, telf)
check_model(thm, qi)
check_transient_model(thm)
@given(
qi=st.floats(1e-10, 1e6),
Di=st.floats(0.0, 1.0, exclude_max=True),
bf=st.floats(0.0, 2.0),
telf=st.floats(0.0, 1e4),
bterm=st.floats(0.0, 1.0),
tterm=st.floats(1e-3, 30.0),
)
def test_THM_terminal(qi: float, Di: float, bf: float, telf: float, bterm: float, tterm: float) -> None:
assume(tterm * dca.DAYS_PER_YEAR > telf)
assume(bterm < bf)
thm = dca.THM(qi, Di, 2.0, bf, telf, bterm, tterm)
check_transient_model(thm)
check_model(thm, qi)
@given(
qi=st.floats(1e-10, 1e6),
bf=st.floats(0.0, 2.0),
telf=st.floats(0.0, 1e4),
bterm=st.floats(0.0, 1.0),
tterm=st.floats(5.0, 30.0),
)
def test_THM_zero_Di(qi: float, bf: float, telf: float, bterm: float, tterm: float) -> None:
assume(tterm * dca.DAYS_PER_YEAR > telf)
assume(bterm < bf)
thm = dca.THM(qi, 0.0, 2.0, bf, telf, bterm, tterm)
check_model(thm, qi)
check_transient_model(thm)
@given(
qi=st.floats(1e-10, 1e6),
Di=st.floats(0.0, 1.0, exclude_max=True),
telf=st.floats(0.0, 1e4),
bterm=st.floats(0.0, 0.5),
tterm=st.floats(5, 30),
)
def test_THM_harmonic(qi: float, Di: float, telf: float, bterm: float, tterm: float) -> None:
assume(tterm * dca.DAYS_PER_YEAR > telf)
thm = dca.THM(qi, Di, 2.0, 1.0, telf, bterm, tterm)
check_model(thm, qi)
check_transient_model(thm)
def test_THM_transient_extra() -> None:
thm = dca.THM(1000.0, 0.80, 2.0, 0.8, 30.0, 0.3, 5.0)
check_transient_model(thm)
check_transient_model_rate_cum(thm)
thm = dca.THM(1000.0, 0.80, 2.0, 0.8, 30.0, 0.06, 0.0)
check_transient_model(thm)
check_transient_model_rate_cum(thm)
thm = dca.THM(1000.0, 1e-10, 2.0, 0.8, 1e-5, 0.5, 0.06)
check_transient_model(thm)
check_transient_model_rate_cum(thm)
with pytest.raises(ValueError) as e:
thm = dca.THM(1000.0, 1e-10, 2.0, 0.3, 30.0, .5, 10.0)
@given(
qi=st.floats(1e-10, 1e6),
Di=st.floats(0.0, 1.0, exclude_max=True),
bf=st.floats(0.0, 2.0),
telf=st.floats(0.0, 1e6),
bterm=st.floats(1e-3, 0.3)
)
@settings(suppress_health_check=[hypothesis.HealthCheck.filter_too_much]) # type: ignore
def test_THM_terminal_exp(qi: float, Di: float, bf: float, telf: float, bterm: float) -> None:
assume(dca.THM.nominal_from_secant(Di, 2.0) >= dca.THM.nominal_from_tangent(bterm))
thm = dca.THM(qi, Di, 2.0, bf, telf, bterm, 0.0)
check_model(thm, qi)
check_transient_model(thm)
@given(
qi=st.floats(0.0, 1e6),
Di=st.floats(0.0, 1.0, exclude_max=True),
bi=st.floats(0.0, 2.0),
Dterm=st.floats(0.0, 1.0, exclude_max=True),
)
def test_MH(qi: float, Di: float, bi: float, Dterm: float) -> None:
assume(dca.MH.nominal_from_secant(Di, bi) >= dca.MH.nominal_from_tangent(Dterm))
mh = dca.MH(qi, Di, bi, Dterm)
check_model(mh, qi)
mh = dca.MH(qi, 0.0, bi, 0.0)
check_model(mh, qi)
@given(
qi=st.floats(0.0, 1e6),
Di=st.floats(0.0, 1.0, exclude_max=True),
Dterm=st.floats(0.0, 1.0, exclude_max=True),
)
def test_MH_harmonic(qi: float, Di: float, Dterm: float) -> None:
assume(dca.MH.nominal_from_secant(Di, 1.0) >= dca.MH.nominal_from_tangent(Dterm))
mh = dca.MH(qi, Di, 1.0, Dterm)
check_model(mh, qi)
@given(
qi=st.floats(0.0, 1e6),
Di=st.floats(0.0, 1.0, exclude_max=True),
Dterm=st.floats(0.0, 1.0, exclude_max=True),
)
def test_MH_no_validate(qi: float, Di: float, Dterm: float) -> None:
assume(dca.MH.nominal_from_secant(Di, 1.0) >= dca.MH.nominal_from_tangent(Dterm))
with pytest.raises(ValueError) as e:
mh = dca.MH(qi, Di, 2.5, Dterm)
mh = dca.MH(qi, Di, 2.5, Dterm, validate_params=[True, True, False, True])
@given(
D=st.floats(0.0, 1.0, exclude_max=True),
b=st.floats(0.0, 2.0),
)
def test_decline_conv(D: float, b: float) -> None:
Dnom = dca.MultisegmentHyperbolic.nominal_from_secant(D, b)
_D = dca.MultisegmentHyperbolic.secant_from_nominal(Dnom, b)
def test_bound_errors() -> None:
with pytest.raises(ValueError) as e:
# < lower bound
ple = dca.PLE(-1000, 0.8, 0.0, 0.5)
with pytest.raises(ValueError) as e:
# lower bound excluded
ple = dca.PLE(1000, 0.8, 0.0, 0.0)
with pytest.raises(ValueError) as e:
# > upper bound
thm = dca.THM(1000, 0.5, 2.0, 10.0, 30.0)
with pytest.raises(ValueError) as e:
# upper bound exluded
thm = dca.THM(1000, 1.5, 2.0, 0.5, 30.0)
with pytest.raises(KeyError) as e:
# invalid parameter
thm = dca.THM(1000, 0.5, 2.0, 0.5, 30.0)
thm.get_param_desc('n')
with pytest.raises(ValueError) as e:
# invalid parameter sequence length
thm = dca.THM.from_params([1000, 0.5, 2.0, 0.5])
def test_terminal_exceeds() -> None:
with pytest.raises(ValueError) as e:
# Dinf > Di
ple = dca.PLE(1000, 0.8, 0.9, 0.5)
with pytest.raises(ValueError) as e:
# Dterm > Di
mh = dca.MH(1000, 0.5, 1.0, 0.9)
with pytest.raises(ValueError) as e:
# bf > bi
thm = dca.THM(1000, 0.8, 1.5, 1.6, 30.0)
with pytest.raises(ValueError) as e:
# tterm < telf
thm = dca.THM(1000, 0.8, 2.0, 1.0, 200.0, 0.3, 100.0 / dca.DAYS_PER_YEAR)
@given(
qi=st.floats(1e-10, 1e6),
Di=st.floats(0.0, 1.0, exclude_max=True),
bf=st.floats(0.0, 2.0),
telf=st.floats(1e-10, 1e4),
bterm=st.floats(1e-3, 0.3, exclude_max=True),
tterm=st.floats(5.0, 30.0),
c=st.floats(1e-10, 1e10),
m0=st.floats(-1.0, 1.0),
m=st.floats(-1.0, 1.0),
t0=st.floats(1e-10, 365.25),
)
@settings(deadline=None) # type: ignore
def test_yield(qi: float, Di: float, bf: float, telf: float, bterm: float, tterm: float,
c: float, m0: float, m: float, t0: float) -> None:
assume(tterm * dca.DAYS_PER_YEAR > telf)
assume(bterm < bf)
thm = dca.THM(qi, Di, 2.0, bf, telf, bterm, tterm)
sec = dca.PLYield(c, m0, m, t0)
thm.add_secondary(sec)
check_yield_model(thm.secondary, 'secondary', qi)
thm = dca.THM(qi, Di, 2.0, bf, telf, bterm, tterm)
wtr = dca.PLYield(c, m0, m, t0)
thm.add_water(wtr)
check_yield_model(thm.water, 'water', qi)
@given(
qi=st.floats(1e-10, 1e6),
Di=st.floats(0.0, 1.0, exclude_max=True),
bf=st.floats(0.0, 2.0),
telf=st.floats(1e-10, 1e4),
bterm=st.floats(1e-3, 0.3, exclude_max=True),
tterm=st.floats(5.0, 30.0),
c=st.floats(1e-10, 1e10),
m0=st.floats(-1.0, 1.0),
m=st.floats(-1.0, 1.0),
t0=st.floats(1e-10, 365.25),
_min=st.floats(0, 100.0),
_max=st.floats(1e4, 5e5)
)
@settings(deadline=None) # type: ignore
def test_yield_min_max(qi: float, Di: float, bf: float, telf: float, bterm: float, tterm: float,
c: float, m0: float, m: float, t0: float, _min: float, _max: float) -> None:
assume(tterm * dca.DAYS_PER_YEAR > telf)
assume(bterm < bf)
thm = dca.THM(qi, Di, 2.0, bf, telf, bterm, tterm)
sec = dca.PLYield(c, m0, m, t0, _min, _max)
thm.add_secondary(sec)
check_yield_model(thm.secondary, 'secondary', qi)
wtr = dca.PLYield(c, m0, m, t0, _min, _max)
thm.add_water(wtr)
check_yield_model(thm.water, 'water', qi)
def test_yield_min_max_invalid() -> None:
with pytest.raises(ValueError) as e:
y = dca.PLYield(1000.0, 0.0, 0.0, 180.0, 10.0, 1.0)
def test_yield_errors() -> None:
with pytest.raises(ValueError) as e:
# < lower bound
ple = dca.PLE(-1000, 0.8, 0.0, 0.5)
with pytest.raises(ValueError) as e:
# lower bound excluded
tplehm = dca.PLE(1000, 0.8, 0.0, 0.0)
with pytest.raises(ValueError) as e:
# > upper bound
thm = dca.THM(1000, 0.5, 2.0, 10.0, 30.0)
with pytest.raises(ValueError) as e:
# upper bound exluded
thm = dca.THM(1000, 1.5, 2.0, 0.5, 30.0)
with pytest.raises(KeyError) as e:
# invalid parameter
thm = dca.THM(1000, 0.5, 2.0, 0.5, 30.0)
thm.get_param_desc('n')
with pytest.raises(ValueError) as e:
# invalid parameter sequence length
thm = dca.THM.from_params([1000, 0.5, 2.0, 0.5])
@given(
L=st.floats(0.0, 2.0),
xlog=st.booleans(),
ylog=st.booleans()
)
def test_bourdet(L: float, xlog: bool, ylog: bool) -> None:
with warnings.catch_warnings(record=True) as w:
der = dca.bourdet(q_data, t_data, L, xlog, ylog)
| 2.515625
| 3
|
uibcdf_biblio/demo/__init__.py
|
uibcdf/UIBCDF_biblio
| 0
|
12781410
|
import pathlib
bib = pathlib.Path(__file__).parent.absolute() / 'bibliography.bib'
del(pathlib)
| 1.34375
| 1
|
api/server/swagger_server/controllers_impl/catalog_service_controller_impl.py
|
srishtipithadia/mlx
| 0
|
12781411
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import connexion
import json
import traceback
from swagger_server.models import ApiCatalogUploadError
from swagger_server.models.api_catalog_upload import ApiCatalogUpload # noqa: E501
from swagger_server.models.api_catalog_upload_response import ApiCatalogUploadResponse
from swagger_server.models.api_catalog_upload_item import ApiCatalogUploadItem
from swagger_server.models.api_list_catalog_items_response import ApiListCatalogItemsResponse # noqa: E501
from swagger_server.controllers_impl.component_service_controller_impl import list_components, upload_component_from_url
from swagger_server.controllers_impl.dataset_service_controller_impl import list_datasets, upload_dataset_from_url
from swagger_server.controllers_impl.model_service_controller_impl import list_models, upload_model_from_url
from swagger_server.controllers_impl.notebook_service_controller_impl import list_notebooks, upload_notebook_from_url
from swagger_server.controllers_impl.pipeline_service_controller_impl import list_pipelines, upload_pipeline_from_url
from swagger_server.util import ApiError
def list_all_assets(page_token=None, page_size=None, sort_by=None, filter=None): # noqa: E501
"""list_all_assets
:param page_token:
:type page_token: str
:param page_size:
:type page_size: int
:param sort_by: Can be format of \"field_name\", \"field_name asc\" or \"field_name desc\" Ascending by default.
:type sort_by: str
:param filter: A string-serialized JSON dictionary with key-value pairs that correspond to the ApiComponent's attribute names and their respective values to be filtered for.
:type filter: str
:rtype: ApiListCatalogItemsResponse
"""
if page_size == 0:
return {}, 204
# TODO: do not mis-use page_token as MySQL result offset
offset = int(page_token) if page_token and page_token.isdigit() else 0
if page_size or page_token:
print(f"WARNING: page_size and page_token are not implemented on {__file__}#list_all_assets()")
list_methods = {
"components": list_components,
"datasets": list_datasets,
"models": list_models,
"notebooks": list_notebooks,
"pipelines": list_pipelines
}
api_response = ApiListCatalogItemsResponse(
components=[], datasets=[], models=[], notebooks=[], pipelines=[],
total_size=0)
for asset_type, list_method in list_methods.items():
asset_list_response, status = list_method(filter=filter, sort_by=sort_by)
if 200 <= status < 300:
asset_list = asset_list_response.__getattribute__(asset_type)
api_response.__getattribute__(asset_type).extend(asset_list)
# TODO: return filtered size or total number of all assets
# api_response.total_size += asset_list_response.total_size
api_response.total_size += len(asset_list)
return api_response, 200
def upload_multiple_assets(body: ApiCatalogUpload): # noqa: E501
"""upload_multiple_assets
:param body:
:type body: ApiCatalogUpload
:rtype: ApiCatalogUploadResponse
"""
if connexion.request.is_json:
body = ApiCatalogUpload.from_dict(connexion.request.get_json()) # noqa: E501
def get_access_token_for_url(url: str) -> str:
for api_access_token in body.api_access_tokens or []:
if api_access_token.url_host in url:
return api_access_token.api_token
return None
upload_methods = {
"components": upload_component_from_url,
"datasets": upload_dataset_from_url,
"models": upload_model_from_url,
"notebooks": upload_notebook_from_url,
"pipelines": upload_pipeline_from_url
}
api_response = ApiCatalogUploadResponse(
components=[], datasets=[], models=[], notebooks=[], pipelines=[],
total_created=0, errors=[], total_errors=0)
for asset_type, upload_method in upload_methods.items():
for asset in body.__getattribute__(asset_type) or []:
try:
api_object, status = upload_method(
url=asset.url, name=asset.name,
access_token=get_access_token_for_url(asset.url))
if 200 <= status < 300:
api_response.__getattribute__(asset_type).append(api_object)
api_response.total_created += 1
else:
# TODO: remove this?
api_error = ApiCatalogUploadError(**asset.to_dict(),
error_message=f"THIS SHOULD NOT HAPPEN: {str(api_object).strip()}",
status_code=500)
api_response.errors.append(api_error)
print(f"THIS SHOULD NOT HAPPEN: {api_error}")
print(traceback.format_exc())
except ApiError as e:
api_error = ApiCatalogUploadError(**asset.to_dict(),
error_message=e.message,
status_code=e.http_status_code)
api_response.errors.append(api_error)
except Exception as e:
api_error = ApiCatalogUploadError(**asset.to_dict(),
error_message=str(e),
status_code=500)
api_response.errors.append(api_error)
print(traceback.format_exc())
api_response.total_errors = len(api_response.errors)
response_status = \
201 if api_response.total_created > 0 and api_response.total_errors == 0 else \
207 if api_response.total_created > 0 and api_response.total_errors > 0 else \
max([e.status_code for e in api_response.errors])
return api_response, response_status
| 1.59375
| 2
|
pegleg/engine/util/pegleg_managed_document.py
|
openstack/airship-pegleg
| 9
|
12781412
|
<gh_stars>1-10
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from datetime import datetime
import logging
from pegleg import config
from pegleg.engine.util import git
PEGLEG_MANAGED_SCHEMA = 'pegleg/PeglegManagedDocument/v1'
ENCRYPTED = 'encrypted'
GENERATED = 'generated'
STORAGE_POLICY = 'storagePolicy'
METADATA = 'metadata'
DEFAULT_LAYER = 'site'
LOG = logging.getLogger(__name__)
__all__ = ['PeglegManagedSecretsDocument']
class PeglegManagedSecretsDocument(object):
"""Object representing one Pegleg managed secret document."""
def __init__(self, document, generated=False, catalog=None, author=None):
"""
Parse and wrap an externally generated document in a
pegleg managed document.
:param document: The content of the source document
:type document: dict
:param bool generated: A flag to indicate the documents are
auto-generated by pegleg (True), or manually created (False).
:param catalog: catalog of the generated secret documents. A catalog
must be provided, only if generated is True.
:type catalog: A subclass of the ABC
pegleg.catalogs.base_catalog.BaseCatalog
"""
self._catalog = catalog
self._author = author
self._generated = generated
if self.is_pegleg_managed_secret(document):
self._pegleg_document = document
else:
self._pegleg_document = self.__wrap(
document, generated, catalog, author)
self._embedded_document = \
self._pegleg_document['data']['managedDocument']
@staticmethod
def __wrap(secrets_document, generated=False, catalog=None, author=None):
"""
Embeds a valid deckhand document in a pegleg managed document.
:param secrets_document: secrets document to be embedded in a
pegleg managed document.
:type secrets_document: dict
:param bool generated: A flag to indicate the documents are
auto-generated by pegleg (True), or manually created (False).
:return: pegleg manged document with the wrapped original secrets
document.
:rtype: dict
"""
layer = secrets_document.get('metadata',
{}).get('layeringDefinition',
{}).get('layer', DEFAULT_LAYER)
layering_definition = OrderedDict(
[('abstract', False), ('layer', layer)])
metadata = OrderedDict(
[
(
'name', '{}/{}'.format(
secrets_document['schema'],
secrets_document['metadata']['name'])),
('schema', 'metadata/Document/v1'),
('labels', secrets_document['metadata'].get('labels', {})),
('layeringDefinition', layering_definition),
('storagePolicy', 'cleartext')
])
data = OrderedDict(
[
(
'managedDocument',
OrderedDict(
[
('schema', secrets_document['schema']),
('metadata', secrets_document['metadata']),
('data', secrets_document['data'])
]))
])
doc = OrderedDict(
[
('schema', PEGLEG_MANAGED_SCHEMA), ('metadata', metadata),
('data', data)
])
if generated:
doc['data'][GENERATED] = {
'at': datetime.utcnow().isoformat(),
'by': author,
'specifiedBy': {
'repo': git.repo_url(config.get_site_repo()),
'reference': config.get_site_rev() or 'master',
'path': catalog.catalog_path,
},
}
return doc
@staticmethod
def is_pegleg_managed_secret(secrets_document):
""""
Verify if the document is already a pegleg managed secrets document.
:return: True if the document is a pegleg managed secrets document,
False otherwise.
:rtype: bool
"""
return PEGLEG_MANAGED_SCHEMA in secrets_document.get('schema')
@property
def embedded_document(self):
"""
parse the pegleg managed document, and return the embedded document
:return: The original secrets document unwrapped from the pegleg
managed document.
:rtype: dict
"""
return self._embedded_document
@property
def name(self):
return self._pegleg_document.get('metadata', {}).get('name')
@property
def data(self):
return self._pegleg_document.get('data')
@property
def pegleg_document(self):
return self._pegleg_document
def is_encrypted(self):
"""If the document is already encrypted return True. False
otherwise."""
return ENCRYPTED in self.data
def is_generated(self):
"""If the document is already marked auto-generated return True. False
otherwise."""
return GENERATED in self.data
def is_storage_policy_encrypted(self):
"""If the document's storagePolicy is set to encrypted return True.
False otherwise."""
return STORAGE_POLICY in self._embedded_document[METADATA] \
and ENCRYPTED in self._embedded_document[METADATA][STORAGE_POLICY]
def set_encrypted(self, author=None):
"""Mark the pegleg managed document as encrypted."""
self.data[ENCRYPTED] = {'at': datetime.utcnow().isoformat()}
if author:
self.data[ENCRYPTED]['by'] = author
def set_decrypted(self):
"""Mark the pegleg managed document as un-encrypted."""
self.data.pop(ENCRYPTED)
def set_secret(self, secret):
self._embedded_document['data'] = secret
def get_secret(self):
return self._embedded_document.get('data')
def get_layer(self):
return self._embedded_document[METADATA]['layeringDefinition']['layer']
| 1.945313
| 2
|
test_Sony.py
|
starsdeep/15663-project
| 16
|
12781413
|
<reponame>starsdeep/15663-project<gh_stars>10-100
import argparse
import os
import torch
from data import SonyTestDataset
from torch.utils.data import DataLoader
from models import Unet
import scipy.io
from tqdm import tqdm
import numpy as np
def test(args):
# device
device = torch.device("cuda:%d" % args.gpu if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.benchmark = True
# data
testset = SonyTestDataset(args.input_dir, args.gt_dir)
test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
# model
model = Unet()
model.load_state_dict(torch.load(args.model))
model.to(device)
model.eval()
# testing
for i, databatch in tqdm(enumerate(test_loader), total=len(test_loader)):
input_full, scale_full, gt_full, test_id, ratio = databatch
scale_full, gt_full = torch.squeeze(scale_full), torch.squeeze(gt_full)
# processing
inputs = input_full.to(device)
outputs = model(inputs)
outputs = outputs.cpu().detach()
outputs = torch.squeeze(outputs)
outputs = outputs.permute(1, 2, 0)
# scaling can clipping
outputs, scale_full, gt_full = outputs.numpy(), scale_full.numpy(), gt_full.numpy()
scale_full = scale_full * np.mean(gt_full) / np.mean(
scale_full) # scale the low-light image to the same mean of the ground truth
outputs = np.minimum(np.maximum(outputs, 0), 1)
# saving
if not os.path.isdir(os.path.join(args.result_dir, 'eval')):
os.makedirs(os.path.join(args.result_dir, 'eval'))
scipy.misc.toimage(scale_full * 255, high=255, low=0, cmin=0, cmax=255).save(
os.path.join(args.result_dir, 'eval', '%05d_00_train_%d_scale.jpg' % (test_id[0], ratio[0])))
scipy.misc.toimage(outputs * 255, high=255, low=0, cmin=0, cmax=255).save(
os.path.join(args.result_dir, 'eval', '%05d_00_train_%d_out.jpg' % (test_id[0], ratio[0])))
scipy.misc.toimage(gt_full * 255, high=255, low=0, cmin=0, cmax=255).save(
os.path.join(args.result_dir, 'eval', '%05d_00_train_%d_gt.jpg' % (test_id[0], ratio[0])))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="evaluating model")
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--input_dir', type=str, default='./dataset/Sony/short/')
parser.add_argument('--gt_dir', type=str, default='./dataset/Sony/long/')
parser.add_argument('--result_dir', type=str, default='./result_Sony/')
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=8, help='multi-threads for data loading')
parser.add_argument('--model', type=str, required=True)
args = parser.parse_args()
# Create Output Dir
if not os.path.exists(args.result_dir):
os.makedirs(args.result_dir)
test(args)
| 2.046875
| 2
|
simuvex/procedures/libc___so___6/recvfrom.py
|
praetorian-inc/simuvex
| 8
|
12781414
|
<gh_stars>1-10
import simuvex
######################################
# recvfrom
######################################
class recvfrom(simuvex.SimProcedure):
#pylint:disable=arguments-differ
def run(self, fd, dst, length, flags): #pylint:disable=unused-argument
data = self.state.posix.read(fd, length)
self.state.memory.store(dst, data)
return length
| 2.140625
| 2
|
tf_rl/examples/DAgger/Atari/DAgger_CartPole.py
|
Rowing0914/TF2_RL
| 8
|
12781415
|
<reponame>Rowing0914/TF2_RL
import gym, argparse
from gym.wrappers import Monitor
import numpy as np
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from run_expert import expert_play, OBS_FILE_PATH, ACT_FILE_PATH, ENV_NAME, DQN_Agent_model
from load_policy import DQN_Agent
OBSERVATION_SPACE = (4,) # for CartPole-v0
NB_ACTIONS = 2 # for CartPole-v0
EPOCHS = 5
BATCH_SIZE = 32
NUM_EPISODES = 10
BETA = 1
def create_model():
"""
Using the same architecture as the one of DQN Agent
Return:
Keras Sequential compiled model
"""
model = Sequential()
model.add(Flatten(input_shape=(1,) + OBSERVATION_SPACE))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(NB_ACTIONS, activation='softmax'))
print(model.summary())
# For a classification problem of actions
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def load_dataset():
"""
Loading the dataset which is the demo of the expert in `run_expert.py`.
Returns:
X: Observations of the game
Y: Actions
"""
X = np.load(OBS_FILE_PATH)
Y = np.load(ACT_FILE_PATH)
print(X.shape, Y.shape)
X = np.reshape(X, (X.shape[0], 1, X.shape[1]))
Y = np_utils.to_categorical(Y)
return X, Y
def train(X, Y, model):
"""
Train the model with the dataset,
then save it in h5 format after the training
Returns:
trained model
"""
model.fit(X, Y, epochs=EPOCHS, batch_size=BATCH_SIZE)
model.save_weights("./weights/DAgger_weights.h5")
model.load_weights("./weights/DAgger_weights.h5")
return model
def prep_dataset(observations, actions):
"""
Reshape and format the training dataset
Args:
observations: a list of observations in an episode
actions: a list of actions in an episode
Returns:
X: Observations of the game
Y: Actions
"""
X = np.array(observations)
X = np.reshape(X, (X.shape[0], 1, X.shape[1]))
Y = np.array(actions)
Y = np_utils.to_categorical(Y)
return X, Y
def DAgger(expert, model, env, type_policy="deterministic"):
"""
This is the implemnetation of DAgger algorithm.
While the agent plays with the environmen, it remembers the encountered states and actions in a single episode
Then when an episode ends, it will update the model with the collected data
Args:
model: Keras trained model
env: Open AI arcade game
type_policy: type of policy
- deterministic => a model chooses the action having the maximum value of its predicting result
- stochastic => a model randomly chooses the action based on its predicting result
"""
rewards = 0
for i_episode in range(NUM_EPISODES):
observation = env.reset()
env.render()
done = False
observations, actions = list(), list()
while not done:
action_agent = model.predict(observation.reshape(1,1,4))[0]
if type_policy == "deterministic":
# deterministic policy
action_agent = np.argmax(action_agent)
elif type_policy == "stochastic":
# stochastic policy
action_agent = np.random.choice(np.arange(NB_ACTIONS), p=action_agent)
# ===== if you want to use an expert for collecting the dataset, open this part! =====
# we assume that beta is 1, so we only rely on the expert for collecting the dataset
action_expert = expert.forward(observation)
action = BETA*action_expert + (1 - BETA)*action_agent
observation, reward, done, info = env.step(action)
rewards += reward
observations.append(observation)
actions.append(action)
if done:
print("Score: ", rewards)
rewards = 0
X, Y = prep_dataset(observations, actions)
model = train(X, Y, model)
break
def DAgger_play(model, env, type_policy="deterministic"):
"""
This is the implemnetation of DAgger algorithm.
While the agent plays with the environmen, it remembers the encountered states and actions in a single episode
Then when an episode ends, it will update the model with the collected data
Args:
model: Keras trained model
env: Open AI arcade game
type_policy: type of policy
- deterministic => a model chooses the action having the maximum value of its predicting result
- stochastic => a model randomly chooses the action based on its predicting result
"""
rewards = 0
for i_episode in range(NUM_EPISODES):
observation = env.reset()
env.render()
done = False
while not done:
action = model.predict(observation.reshape(1,1,4))[0]
if type_policy == "deterministic":
# deterministic policy
action = np.argmax(action)
elif type_policy == "stochastic":
# stochastic policy
action = np.random.choice(np.arange(NB_ACTIONS), p=action)
observation, reward, done, info = env.step(action)
rewards += reward
if done:
print("Score: ", rewards)
rewards = 0
break
def random_play(env):
for i_episode in range(NUM_EPISODES):
observation = env.reset()
done = False
t = 0
rewards = 0
while not done:
env.render()
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
rewards += reward
if done:
print("Score: {0}".format(rewards))
break
t += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_weights', type=str, default="./weights/DAgger_weights.h5")
parser.add_argument('--type_policy', type=str, default="deterministic")
parser.add_argument('--test', action="store_true")
parser.add_argument('--random', action="store_true")
args = parser.parse_args()
env = gym.make(ENV_NAME)
expert = DQN_Agent(DQN_Agent_model)
if args.test:
model = create_model()
model.load_weights("./weights/DAgger_weights.h5")
DAgger(expert, model, env, args.type_policy)
DAgger_play(model, env, args.type_policy)
elif args.random:
random_play(env)
else:
model = create_model()
X, Y = load_dataset()
model = train(X, Y, model)
DAgger(expert, model, env, args.type_policy)
DAgger_play(model, env, args.type_policy)
| 2.6875
| 3
|
UT330BUI/view/readsave.py
|
duncanbarth/UT330B
| 8
|
12781416
|
<reponame>duncanbarth/UT330B<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on: 15:25:51 05-Jan-2020
Author: <NAME>
This code is licensed under the MIT license
"""
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
from bokeh.models.widgets import (Button, Div, Panel)
from bokeh.layouts import column, row
import pandas as pd
# %%---------------------------------------------------------------------------
# ReadSave
# -----------------------------------------------------------------------------
class ReadSave():
"""Reads temperature and humidity data from the 330B and saves the data to
disk."""
INSTRUCTIONS = ("""To use this tab, press the buttons in the """
"""following order: <br>"""
"""<ol>"""
""" <li>"""
""" Connect to UT330B - this button makes the """
""" connection to the device"""
""" </li>"""
""" <li>"""
""" Read UT330B data - this button reads the """
""" temperature and humidity data from the device"""
""" </li>"""
""" <li>"""
""" Write to disk - this button writes the """
""" temperature and humidity data read from the """
""" device to disk, the file name is the most """
""" recent date and time in the data"""
""" </li>"""
""" <li>"""
""" Erase UT330B data - this button erases the """
""" temperature and humidity data from the device """
""" which makes room for more data."""
""" </li>"""
""" <li>"""
""" Disconnect from UT330B - this button breaks the """
""" connection to the device"""
""" </li>"""
"""</ol>""")
# %%
def __init__(self, controller):
"""Method sets up object. First part of two-part initialization."""
self.controller = controller
# Instructions header
self.instructions_header =\
Div(text="""<span style='font-weight:bold'>"""
"""How to use this tab</span>""")
# Provides instructions on how to use the tab.
self.instructions = Div(text=self.INSTRUCTIONS)
# Widgets header
self.widgets_header =\
Div(text="""<span style='font-weight:bold'>"""
"""Read, save, erase controls</span>""")
# Connects to the UT330B device
self.connect =\
Button(label="""Connect to UT330B""", button_type="""success""")
# Reads in the data from the UT330B device
self.read_ut330b =\
Button(label="""Read UT330B data""", button_type="""success""")
# Writes temperature and humidity data to disk.
self.write_to_disk =\
Button(label="""Write to disk""", button_type="""success""")
# Removes all UT33)B temperature and humidity data from device.
self.erase_data =\
Button(label="""Erase UT330B data""", button_type="""success""")
# Disconnects from the UT330B device.
self.disconnect =\
Button(label="""Disconnect from UT330B""",
button_type="""success""")
# Status header
self.status_header =\
Div(text="""<span style='font-weight:bold'>"""
"""Connection status</span>""")
# Status information on UT330B.
self.status = Div(text=self.controller.status)
# Layout widget and figures
self.layout =\
column(children=[self.instructions_header,
self.instructions,
self.status_header,
self.status,
self.widgets_header,
row(self.connect, self.read_ut330b,
self.write_to_disk, self.erase_data,
self.disconnect)],
sizing_mode="stretch_both")
self.panel = Panel(child=self.layout,
title='Read & save')
# %%
def setup(self):
"""Method sets up object. Second part of two-part initialization."""
self.connect.on_click(self.callback_connect)
self.read_ut330b.on_click(self.callback_read_ut330b)
self.write_to_disk.on_click(self.callback_write_to_disk)
self.erase_data.on_click(self.callback_erase_data)
self.disconnect.on_click(self.callback_disconnect)
# %%
def update(self):
"""Method updates object."""
self.status.text = self.controller.status
# %%
def callback_connect(self):
"""Callback method for Connect"""
self.controller.connect()
# %%
def callback_read_ut330b(self):
"""Callback method for Read UT330B"""
self.controller.read_data()
# %%
def callback_write_to_disk(self):
"""Callback method for Write to disk"""
df = pd.DataFrame(self.controller.device_data)
if df.empty:
self.status.text = ("Can't write data to UT330B because "
"there's no data to write.")
return
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
time_str = df['Timestamp'].max().strftime("%Y%m%d_%H%M%S")
# Check folder exists, if not, create it
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.dirname(dir_path)
folder = os.path.join(dir_path, 'data')
if not os.path.isdir(folder):
os.mkdir(folder)
data_file = \
os.path.join(folder,
'UT330_data_{0}.csv'.format(time_str))
df.to_csv(data_file, index=False)
self.status.text = "Wrote data to file {0}.".format(data_file)
# %%
def callback_erase_data(self):
"""Callback method for Erase data"""
self.controller.erase()
# %%
def callback_disconnect(self):
"""Callback method for Disconnect"""
self.controller.disconnect()
| 2.5625
| 3
|
snowflake_connection/test_connect.py
|
BigMountainTiger/python-excercise-repository
| 0
|
12781417
|
# https://docs.snowflake.com/en/user-guide/python-connector-install.html
# https://pypi.org/project/python-dotenv/
# pip install snowflake-connector-python
import os
import snowflake.connector
from dotenv import load_dotenv
load_dotenv()
user = os.getenv('SNOWUSER')
pwd = os.getenv('<PASSWORD>')
account = os.getenv('ACCOUNT')
def run():
ctx = snowflake.connector.connect(
user = user,
password = <PASSWORD>,
account = f'{account}.us-east-1'
)
cs = ctx.cursor()
try:
cs.execute("SELECT current_version()")
one_row = cs.fetchone()
print(one_row[0])
finally:
cs.close()
ctx.close()
if __name__ == '__main__':
run()
| 2.625
| 3
|
backend/api/pokemons/urls.py
|
jaenia/pokebattle
| 0
|
12781418
|
<gh_stars>0
from django.urls import path
from . import views
urlpatterns = [
path("", views.PokemonListEndpoint.as_view(), name="pokemon_list"),
]
| 1.507813
| 2
|
IMC_GAE_NEW/train.py
|
WesleyClode/MBIMC-GAE
| 0
|
12781419
|
"""Training GCMC model on the MovieLens data set.
The script loads the full graph to the training device.
"""
import os, time
import argparse
import logging
import random
import string
import dgl
import scipy.sparse as sp
import pandas as pd
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from data import DataSetLoader
#from data_custom import DataSetLoader
from model import BiDecoder, GCMCLayer, MLPDecoder
from utils import get_activation, get_optimizer, torch_total_param_num, torch_net_info, MetricLogger
from utils import to_etype_name
from sklearn.metrics import ndcg_score
#f1 = open(os.path.join(DATA_ROOT, 'EHCF.txt'), 'w')
def sample_negative(ratings, sample_rate, item_set):
"""
input:
1. training rating ::pd.frame
2. sample number::int
3. item_set:a set of item::set
"""
#"""return all negative items & 100 sampled negative items"""
interact_status = ratings.groupby('user_id')['movie_id'].apply(set).reset_index().rename(columns={'itemId': 'interacted_items'})
#print(interact_status)
#item_list = set(item_list)
interact_status['negative_items'] = interact_status['movie_id'].apply(lambda x: item_set - x)
#print(interact_status['negative_items'])
interact_status['negative_samples'] = interact_status['negative_items'].apply(lambda x: random.sample(x, sample_rate))
return interact_status[['user_id', 'negative_items', 'negative_samples']]
def generate_pair(user_list, num_movie):
# 输入user_list num_movie
# num_movie 是电影的总数
rating_pairs = (np.array(np.array([[ele] * num_movie for ele in user_list]).flatten(),
dtype=np.int64),
np.array(np.array([[np.arange(num_movie)] * len(user_list)]).flatten(),
dtype=np.int64))
return rating_pairs
def generate_dec_graph(rating_pairs, num_user, num_movie):
#print(rating_pairs)
#print("***:",len(rating_pairs), num_user, num_movie)
ones = np.ones_like(rating_pairs[0])
user_movie_ratings_coo = sp.coo_matrix(
(ones, rating_pairs),
shape=(num_user, num_movie), dtype=np.float32)
g = dgl.bipartite_from_scipy(user_movie_ratings_coo, utype='_U', etype='_E', vtype='_V')
return dgl.heterograph({('user', 'rate', 'movie'): g.edges()},
num_nodes_dict={'user': num_user, 'movie': num_movie})
class Net(nn.Module):
def __init__(self, args):
super(Net, self).__init__()
self._act = get_activation(args.model_activation)
self.encoder = nn.ModuleList()
self.encoder.append(GCMCLayer(args.rating_vals,
args.src_in_units,
args.dst_in_units,
args.gcn_agg_units,
args.gcn_out_units,
args.gcn_dropout,
args.gcn_agg_accum,
agg_act=self._act,
share_user_item_param=args.share_param,
device=args.device))
self.gcn_agg_accum = args.gcn_agg_accum
self.rating_vals = args.rating_vals
self.device = args.device
self.gcn_agg_units = args.gcn_agg_units
self.src_in_units = args.src_in_units
for i in range(1, args.layers):
if args.gcn_agg_accum == 'stack':
gcn_out_units = args.gcn_out_units * len(args.rating_vals)
else:
gcn_out_units = args.gcn_out_units
self.encoder.append(GCMCLayer(args.rating_vals,
args.gcn_out_units,
args.gcn_out_units,
gcn_out_units,
args.gcn_out_units,
args.gcn_dropout - i*0.1,
args.gcn_agg_accum,
agg_act=self._act,
share_user_item_param=args.share_param,
ini = False,
device=args.device))
if args.decoder == "Bi":
self.decoder = BiDecoder(in_units= args.gcn_out_units, #* args.layers,
num_classes=len(args.rating_vals),
num_basis=args.gen_r_num_basis_func)
'''
self.decoder2 = MLPDecoder(in_units= args.gcn_out_units * 2,
num_classes=len(args.rating_vals),
num_basis=args.gen_r_num_basis_func)
'''
elif args.decoder == "MLP":
if args.loss_func == "CE":
num_classes = len(args.rating_vals)
else:
num_classes = 1
self.decoder = MLPDecoder(in_units= args.gcn_out_units * args.layers,
num_classes=num_classes,
num_basis=args.gen_r_num_basis_func)
self.rating_vals = args.rating_vals
def forward(self, enc_graph, dec_graph, ufeat, ifeat, Two_Stage = False):
user_out = []
movie_out = []
for i in range(0, args.layers):
user_o, movie_o = self.encoder[i](
enc_graph,
ufeat,
ifeat,
Two_Stage)
if i == 0:
user_out = user_o
movie_out = movie_o
else:
user_out += user_o / float(i + 1)
movie_out += movie_o /float(i + 1)
#user_out.append(user_o)
#movie_out.append(movie_o)
ufeat = user_o
ifeat = movie_o
#pred_ratings = self.decoder2(dec_graph, th.cat([user_out[0], user_out[1]], 1), th.cat([movie_out[1], movie_out[0]], 1))
#user_out = th.cat(user_out, 1)
#movie_out = th.cat(movie_out, 1)
#print("user_out:", user_out[0])
#print("movie_out:", movie_out[0])
pred_ratings = self.decoder(dec_graph, user_out, movie_out)
W_r_last = None
reg_loss = 0.0
'''
for rating in self.rating_vals:
rating = to_etype_name(rating)
if W_r_last is not None:
reg_loss += th.sum((self.encoder[0].W_r[rating] - W_r_last)**2)
W_r_last = self.encoder[0].W_r[rating]
#W_r_last_2 = self.encoder_2.W_r[rating]
'''
W = th.matmul(self.encoder[0].att, self.encoder[0].basis.view(self.encoder[0].basis_units, -1))
W = W.view(len(self.rating_vals), self.src_in_units, -1)
for i, rating in enumerate(self.rating_vals):
rating = to_etype_name(rating)
if i != 0:
reg_loss += -th.sum(th.cosine_similarity(W[i,:,:], W[i-1,:,:], dim=1))
return pred_ratings, reg_loss, user_out, movie_out, W
def train(args):
print(args)
dataset = DataSetLoader(args.data_name, args.device,
use_one_hot_fea=args.use_one_hot_fea,
symm=args.gcn_agg_norm_symm,
test_ratio=args.data_test_ratio,
valid_ratio=args.data_valid_ratio,
sample_rate = args.sample_rate)
print("Loading data finished ...\n")
args.src_in_units = dataset.user_feature_shape[1]
args.dst_in_units = dataset.movie_feature_shape[1]
args.rating_vals = dataset.possible_rating_values
### build the net
net = Net(args=args)
net = net.to(args.device)
nd_possible_rating_values = th.FloatTensor(dataset.possible_rating_values).to(args.device)
rating_loss_net = nn.CrossEntropyLoss()
learning_rate = args.train_lr
optimizer = get_optimizer(args.train_optimizer)(net.parameters(), lr=learning_rate)
print("Loading network finished ...\n")
### perpare training data
train_gt_labels = dataset.train_labels
train_gt_ratings = dataset.train_truths
### prepare the logger
NDCG_logger = MetricLogger(['recall50', 'recall100', 'recall200','ndcg50', 'ndcg100', 'ndcg200'], ['%.4f', '%.4f', '%.4f','%.4f', '%.4f', '%.4f'], os.path.join(args.save_dir, 'NDCG.csv'))
### declare the loss information
best_valid_rmse = np.inf
best_valid_ndcg = -np.inf
best_test_ndcg = []
no_better_valid = 0
best_iter = -1
count_rmse = 0
count_num = 0
count_loss = 0
dataset.train_enc_graph = dataset.train_enc_graph.int().to(args.device)
dataset.train_dec_graph = dataset.train_dec_graph.int().to(args.device)
dataset.valid_enc_graph = dataset.train_enc_graph
dataset.valid_dec_graph = dataset.valid_dec_graph.int().to(args.device)
dataset.test_enc_graph = dataset.test_enc_graph.int().to(args.device)
dataset.test_dec_graph = dataset.test_dec_graph.int().to(args.device)
train_m = dataset.train_m
test_m = dataset.test_m
tset = dataset.tset
user_num ,item_num = train_m.shape[0], train_m.shape[1]
#dataset.valid_recall_dec_graph = dataset.valid_recall_dec_graph.to(args.device)
#dataset.test_recall_dec_graph = dataset.test_recall_dec_graph.to(args.device)
print("Start training ...")
train_rating_pairs, train_rating_values = dataset._generate_pair_value(dataset.train_rating_info)
def update_encode_graph(dataset, train_rating_pairs, train_rating_values, sampled_data):
train_rating_pairs_zeros, train_rating_values_zeros = dataset._generate_pair_value_for_zero(dataset.train_rating_info, sampled_data)
train_rating_pairs = (np.append(train_rating_pairs[0], train_rating_pairs_zeros[0]), np.append(train_rating_pairs[1], train_rating_pairs_zeros[1]))
train_rating_values = np.append(train_rating_values, train_rating_values_zeros)
dataset.train_enc_graph = dataset._generate_enc_graph(train_rating_pairs, train_rating_values, add_support = True)
dataset.train_enc_graph = dataset.train_enc_graph.int().to(args.device)
dataset.valid_enc_graph = dataset.train_enc_graph
return dataset.train_enc_graph
def sample_data(interact_status, random_number, sample_rate):
random.seed(random_number)
interact_status['negative_samples'] = interact_status['negative_items'].apply(lambda x: random.sample(x, sample_rate))
return interact_status[['user_id', 'negative_items', 'negative_samples']]
seed_list = np.random.randint(0, 10000, (args.train_max_iter,))
Two_Stage = False
#sampled_data = sample_data(negitive_all, random_number = seed_list[iter_idx], sample_rate = 3)
negitive_all = dataset.negative_all(dataset.train_rating_info)
sampled_data = sample_data(negitive_all, random_number = 1, sample_rate = 99)
dataset.train_enc_graph = update_encode_graph(dataset, train_rating_pairs, train_rating_values, sampled_data)
dataset.valid_enc_graph = dataset.train_enc_graph
for iter_idx in range(1, args.train_max_iter):
#sampled_data = sample_data(negitive_all, random_number = 1, sample_rate = 3)
#dataset.train_enc_graph = update_encode_graph(dataset, train_rating_pairs, train_rating_values, sampled_data)
print("iter:",iter_idx)
net.train()
pred_ratings, reg_loss, user_out, movie_out, W = net(dataset.train_enc_graph, dataset.train_dec_graph,
dataset.user_feature, dataset.movie_feature, Two_Stage)
loss = rating_loss_net(pred_ratings, train_gt_labels).mean() + args.ARR * reg_loss
count_loss += loss.item()
optimizer.zero_grad()
loss.backward(retain_graph=True)
nn.utils.clip_grad_norm_(net.parameters(), args.train_grad_clip)
optimizer.step()
real_pred_ratings = (th.softmax(pred_ratings, dim=1) * nd_possible_rating_values.view(1, -1)).sum(dim=1)
#print(real_pred_ratings.shape)
# 对pred的
if iter_idx < 100:
if iter_idx % 10 == 0:
recall50_, recall100_, recall200_, ndcg50_, ndcg100_, ndcg200_ = \
dev_step(tset, train_m, test_m, net, dataset, args, nd_possible_rating_values)
#dev_cold(u_train,i_train, tset, train_m, test_m)
NDCG_logger.log(recall50 = recall50_, recall100 = recall100_, recall200 = recall200_, ndcg50 = ndcg50_, ndcg100 = ndcg100_, ndcg200 = ndcg200_)
if iter_idx >= 500:
recall50, recall100, recall200, ndcg50, ndcg100, ndcg200 = \
dev_step(tset, train_m, test_m, net, dataset, args ,nd_possible_rating_values)
NDCG_logger.log(recall50 = recall50_, recall100 = recall100_, recall200 = recall200_, ndcg50 = ndcg50_, ndcg100 = ndcg100_, ndcg200 = ndcg200_)
#dev_cold(u_train,i_train, tset, train_m, test_m)
NDCG_logger.close()
def dev_step(tset, train_m, test_m, net, dataset, args, nd_possible_rating_values):
"""
Evaluates model on a dev set
"""
batch_size = 128
#print("tset:",tset)
user_te = np.array(list(tset.keys()))
#print("user_te:",user_te)
user_te2 = user_te[:, np.newaxis]
#user_te2 = user_te
ll = int(len(user_te) / batch_size) + 1
recall50 = []
recall100 = []
recall200 = []
ndcg50 = []
ndcg100 = []
ndcg200 = []
for batch_num in range(ll):
print(batch_num/ll*100,"%")
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, len(user_te))
# u_batch 是每个batch中的一个对user的一个list
u_batch = user_te2[start_index:end_index]
# batch_users 是这个batch中user的个数
batch_users = end_index - start_index
num_user = train_m.shape[0]#user总数
num_movie = train_m.shape[1]#item总数
user_list = user_te[start_index:end_index]
batch_rating_pairs = generate_pair(user_list, num_movie)
batch_dec_graph = generate_dec_graph(batch_rating_pairs, num_user, num_movie).to(args.device)
Two_Stage = False
pred_ratings, reg_loss, user_out, movie_out, W = net(dataset.train_enc_graph, batch_dec_graph, dataset.user_feature, dataset.movie_feature, Two_Stage)
real_pred_ratings = (th.softmax(pred_ratings, dim=1) * nd_possible_rating_values.view(1, -1)).sum(dim=1)
u_b = user_te[start_index:end_index]
real_pred_ratings = real_pred_ratings.cpu()
#print("pred_shape:", real_pred_ratings.shape)
pre = real_pred_ratings.reshape(batch_users, -1)
#print("pred_shape:", pre.shape)
#pre = np.reshape(real_pred_ratings, (batch_users, num_movie))
pre = pre.detach().numpy()
idx = np.zeros_like(pre, dtype=bool)
idx[train_m[u_b].nonzero()] = True
pre[idx] = -np.inf
recall = []
for kj in [50, 100, 200]:
idx_topk_part = np.argpartition(-pre, kj, 1)
# print pre[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]]
# print idx_topk_part
pre_bin = np.zeros_like(pre, dtype=bool)
pre_bin[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]] = True
# print pre_bin
true_bin = np.zeros_like(pre, dtype=bool)
true_bin[test_m[u_b].nonzero()] = True
tmp = (np.logical_and(true_bin, pre_bin).sum(axis=1)).astype(np.float32)
#print("tmp:",tmp)
recall.append(tmp / np.minimum(kj, true_bin.sum(axis=1)))
#print("recall:",tmp / np.minimum(kj, true_bin.sum(axis=1)))
# print tmp
#print("recall:",recall)
ndcg = []
for kj in [20, 40, 80]:
# 获取前20个元素的大致序号
idx_topk_part = np.argpartition(-pre, kj, 1)
#print("pre:",pre.shape)
#
#print("idx_topk_part[:, :kj]:",idx_topk_part[:, :kj])
#获取每个用户对应的前20个预测的index
topk_part = pre[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]]
#print("topk_part:",topk_part[0:2])
idx_part = np.argsort(-topk_part, axis=1)
# 将预测分数进行排序,从大到校输出index的值
#print("idx_part:",idx_part[0:2])
idx_topk = idx_topk_part[np.arange(end_index - start_index)[:, np.newaxis], idx_part]
# 得到原来的序列中的对应index
#print("idx_topk:",idx_topk[0:2])
tp = np.log(2) / np.log(np.arange(2, kj + 2))
test_batch = test_m[u_b]
#print("test_batch:",test_batch)
DCG = (test_batch[np.arange(batch_users)[:, np.newaxis], idx_topk].toarray() * tp).sum(axis=1)
# 就只计算真实结果在预测结果中的第几号的dcg
#print("tp:",tp)
#print("DCG:",DCG)
IDCG = np.array([(tp[:min(n, kj)]).sum()
for n in test_batch.getnnz(axis=1)])
#print("IDCG:",np.array([(tp[:min(n, kj)]).sum()
# for n in test_batch.getnnz(axis=1)]))
ndcg.append(DCG / IDCG)
#print("ndcg:",ndcg)
recall50.append(recall[0])
recall100.append(recall[1])
recall200.append(recall[2])
ndcg50.append(ndcg[0])
ndcg100.append(ndcg[1])
ndcg200.append(ndcg[2])
recall50 = np.hstack(recall50)
recall100 = np.hstack(recall100)
recall200 = np.hstack(recall200)
ndcg50 = np.hstack(ndcg50)
ndcg100 = np.hstack(ndcg100)
ndcg200 = np.hstack(ndcg200)
print("recall50:",recall50[0:10])
print("ndcg50:", ndcg50.shape)
print("recall50:", np.mean(recall50), "ndcg50:",np.mean(ndcg50))
print("recall100:",np.mean(recall100),"ndcg100:", np.mean(ndcg100))
print("recall200:",np.mean(recall200), "ndcg200:",np.mean(ndcg200))
#f1.write(str(np.mean(recall100)) + ' ' + str(np.mean(ndcg100)) + '\n')
#f1.flush()
return np.mean(recall50), np.mean(recall100), np.mean(recall200), np.mean(ndcg50), np.mean(ndcg100), np.mean(ndcg200)
def config():
parser = argparse.ArgumentParser(description='PGMC')
parser.add_argument('--seed', default=125, type=int) #123
parser.add_argument('--device', default='1', type=int,
help='Running device. E.g `--device 0`, if using cpu, set `--device -1`')
parser.add_argument('--save_dir', type=str, help='The saving directory')
parser.add_argument('--save_id', type=int, help='The saving log id')
parser.add_argument('--silent', action='store_true')
parser.add_argument('--data_name', default='yahoo_music', type=str,
help='The dataset name: ml-100k, ml-1m, ml-10m, flixster, douban, yahoo_music')
parser.add_argument('--data_test_ratio', type=float, default=0.1) ## for ml-100k the test ration is 0.2
parser.add_argument('--data_valid_ratio', type=float, default=0.05)
parser.add_argument('--use_one_hot_fea', action='store_true', default=False)
parser.add_argument('--model_activation', type=str, default="leaky")
parser.add_argument('--sample_rate', type=int, default=1)
parser.add_argument('--gcn_dropout', type=float, default=0.7)
parser.add_argument('--gcn_agg_norm_symm', type=bool, default=True)
parser.add_argument('--gcn_agg_units', type=int, default=1800)
parser.add_argument('--gcn_agg_accum', type=str, default="sum")
parser.add_argument('--gcn_out_units', type=int, default=75)
parser.add_argument('--gen_r_num_basis_func', type=int, default=2)
parser.add_argument('--train_max_iter', type=int, default=50000)
parser.add_argument('--train_log_interval', type=int, default=1)
parser.add_argument('--train_valid_interval', type=int, default=1)
parser.add_argument('--train_optimizer', type=str, default="adam")
parser.add_argument('--decoder', type=str, default="Bi")
parser.add_argument('--train_grad_clip', type=float, default=1.0)
parser.add_argument('--train_lr', type=float, default=0.01)
parser.add_argument('--train_min_lr', type=float, default=0.001)
parser.add_argument('--train_lr_decay_factor', type=float, default=0.5)
parser.add_argument('--train_decay_patience', type=int, default=50)
parser.add_argument('--layers', type=int, default=1)
parser.add_argument('--train_early_stopping_patience', type=int, default=200)
parser.add_argument('--share_param', default=True, action='store_true')
parser.add_argument('--ARR', type=float, default='0.000004')
parser.add_argument('--loss_func', type=str, default='CE')
parser.add_argument('--sparse_ratio', type=float, default=0.0)
args = parser.parse_args()
args.device = th.device(args.device) if args.device >= 0 else th.device('cpu')
### configure save_fir to save all the info
now = int(round(time.time()*1000))
now02 = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(now/1000))
if args.save_dir is None:
args.save_dir = args.data_name+"_" + ''.join(now02)
if args.save_id is None:
args.save_id = np.random.randint(20)
args.save_dir = os.path.join("log", args.save_dir)
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
return args
if __name__ == '__main__':
'''
ml_1m : param, ARR = 0.0000004, gcn_agg_units = 1000, gcn_agg_accum = sum, tmse = 0.8322, valid_ratio = 0.05
ml_100k : param, ARR = 0.000001, gcn_agg_units = 500, gcn_agg_accum = sum, tmse = 0.9046, valid_ratio = 0.05
1lyaer ml_1m : param, ARR = 0.0000005, gcn_agg_units = 2400, gcn_agg_accum = sum, tmse = 0.8305, valid_ratio = 0.05, gcn_out_units = 75
1layer ml_100k : param, pos_emb, ARR = 0.000005, gcn_agg_units = 750, gcn_agg_accum = sum, tmse = 0.8974, valid_ratio = 0.05, gcn_out_units = 75
2layer ml_100k : param, pos_emb, ARR = 0.000005, gcn_agg_units = 750, gcn_agg_accum = sum, tmse = 0.8969, valid_ratio = 0.05, gcn_out_units = 75
2lyaer ml_1m : param, ARR = 0.0000004, gcn_agg_units = 1800, gcn_agg_accum = sum, tmse = 0.8319, valid_ratio = 0.05, gcn_out_units = 75
'''
args = config()
np.random.seed(args.seed)
th.manual_seed(args.seed)
if th.cuda.is_available():
th.cuda.manual_seed_all(args.seed)
train(args)
| 2.703125
| 3
|
sequence_analysis_test_1.py
|
hugodecasta/representron
| 0
|
12781420
|
seq1 = 'defdefdefLLLOOLLLdefdefdef'
seq2 = 'abcabcabcLLLOOLLLaaaaaaaaa'
def create_network(sequence):
net = {'@':sequence[0],None:[]}
for i in range(len(sequence)):
sym = sequence[i]
next_sym = None
if i < len(sequence)-1:
next_sym = sequence[i+1]
if sym not in net:
net[sym] = []
net[sym].append(next_sym)
return net
def is_direct(stack):
f_sym = stack[0]
for sym in stack:
if not sym == f_sym:
return False
return True
def find_episods(net):
episods = []
sym_to_process = [net['@']]
current_episod = {}
while len(sym_to_process) > 0:
sym = sym_to_process[0]
del sym_to_process[0]
stack = net[sym]
if current_episod == {}:
current_episod['@'] = sym
current_episod[sym] = stack
if not is_direct(stack):
episods.append(current_episod)
current_episod = {}
else:
for ssym in stack:
if ssym not in sym_to_process:
sym_to_process.append(ssym)
return episods
net = create_network(seq1)
epi = find_episods(net)
print(net)
print(epi)
| 2.703125
| 3
|
sample/ch6/ma-janome.py
|
wagase/scraping
| 0
|
12781421
|
<filename>sample/ch6/ma-janome.py
from janome.tokenizer import Tokenizer
t = Tokenizer()
malist = t.tokenize("庭には二羽鶏がいる。")
for n in malist:
print(n)
| 2.46875
| 2
|
website/CMS/models.py
|
bubai666sen/Visitor-Manager
| 0
|
12781422
|
from django.db import models
# Create your models here.
class Page(models.Model):
STATUS_CHOICES = (
(1, 'Active'),
(2, 'Inactive'),
)
PAGE_CHOICES = (
(1, 'Home'),
(2, 'About Us'),
)
page = models.PositiveSmallIntegerField(choices=PAGE_CHOICES,unique=True)
title = models.CharField(max_length = 255, null=True, blank=True)
heading = models.CharField(max_length = 255, null=True, blank=True)
short_description = models.CharField(max_length = 255, null=True, blank=True)
long_description = models.CharField(max_length = 255, null=True, blank=True)
image = models.ImageField(upload_to='static/uploads/cms/', null=True, blank=True)
background_image = models.ImageField(upload_to='static/uploads/cms/', null=True, blank=True)
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES, null=True, blank=True)
def __str__(self): # __unicode__ for Python 2
return self.title + " (" + str(self.page) + ")"
| 2.328125
| 2
|
monroe/solver/solve.py
|
cypher-me/HAS-Qualifier-Challenges
| 75
|
12781423
|
<filename>monroe/solver/solve.py<gh_stars>10-100
#!/bin/python
import os,sys,time
from binascii import hexlify, unhexlify
from struct import pack, unpack
from pwnlib.tubes.remote import remote
from pwnlib.elf import ELF
def sendEnableTlm(sock):
msg = unhexlify("1880C0000011079A3132372E302E302E3100000000000000")
sock.send( msg )
print("Enabled TLM")
def addSchEntry(sock):
msg = unhexlify("1895C000000D0591040005000100010000002A00")
sock.send( msg )
print("Added msg-id \"42\" to slot 4, activity 5")
sys.stdout.flush()
if __name__ == "__main__":
Host = os.getenv("HOST", "localhost")
Port = int(os.getenv("PORT", 31360))
sockChal = remote(Host, Port)
print("Initiating challenge by connecting to "+ str(Host)+":"+ str(Port))
sys.stdout.flush()
ticket = os.getenv("TICKET", "")
if len(ticket):
sockChal.recvline()
sockChal.send(ticket + "\n")
sockChal.recvuntil(b'tcp:', drop=True)
line = sockChal.recvline().strip()
print("Connecting to", line)
sys.stdout.flush()
sockChal.recvuntil(b'CFE_ES_Main entering OPERATIONAL state', drop=True)
time.sleep(5)
TlmHost,TlmPort = line.split(b":")
TlmHost = Host
print("Connecting to Tlm server at "+str(TlmHost)+":"+str(TlmPort))
sockTlm = remote(TlmHost, TlmPort)
sendEnableTlm(sockTlm)
time.sleep(3)
addSchEntry(sockTlm)
time.sleep(3)
addSchEntry(sockTlm)
time.sleep(3)
#outs = sockTlm.recv(2000)
#print(outs)
sockTlm.recvuntil(b"flag{",timeout=10)
flag = "flag{" + sockTlm.recvuntil("}").decode('utf-8')
print(flag)
sys.stdout.flush()
| 2.203125
| 2
|
kissim/api/__init__.py
|
volkamerlab/kissim
| 15
|
12781424
|
<filename>kissim/api/__init__.py
"""
Defines easy programmatic access for any entry point.
"""
from .encode import encode
from .normalize import normalize
from .compare import compare
from .weights import weights
from .outliers import outliers
from .subset import subset
| 1.265625
| 1
|
utils/genomes.py
|
AndersenLab/pyPipeline
| 5
|
12781425
|
<filename>utils/genomes.py
import os, sys
import yaml
from utils import *
def fetch_genome(reference_name):
"""
Downloads a reference genome and prepares for aligment
"""
from utils import script_dir
genome_list = yaml.load(open(script_dir + "/utils/genomes.yaml","r"))
makedir("genomes")
if reference_name not in genome_list:
msg("Reference Genome not available", "error")
ftp_loc = genome_list[reference_name]
filename = os.path.split(ftp_loc)[1]
makedir("{script_dir}/genomes/{reference_name}".format(**locals()))
reference_loc = "{script_dir}/genomes/{reference_name}/{filename}".format(**locals())
if not file_exists( reference_loc + ".sa"):
print("Downloading {filename}".format(**locals()))
os.system("curl {ftp_loc} > {script_dir}/genomes/{reference_name}/{filename}".format(**locals()))
# Unzip and rezip with bgzip
if filename.endswith(".gz"):
os.system("gunzip {reference_loc} && bgzip {reference_loc2}".format(reference_loc=reference_loc, reference_loc2=reference_loc.replace(".gz","")))
print("Indexing {script_dir}/genomes/{reference_name}/{filename}".format(**locals()))
os.system("bwa index {script_dir}/genomes/{reference_name}/{filename}".format(**locals()))
else:
msg("Reference Already downloaded and indexed.", "error")
def list_genomes():
"""
Prints a list of available genomes.
"""
genome_list = yaml.load(open(script_dir + "/utils/genomes.yaml","r"))
print("")
print("\033[1m%-30s\t%-30s\033[0m" % ("Reference Name", "Location"))
for k,v in genome_list.items():
print("%-30s\t%-30s" % (k, v))
print("")
| 2.96875
| 3
|
tests/unittests/load_functions/outside_main_code_in_main/main.py
|
anandagopal6/azure-functions-python-worker
| 277
|
12781426
|
<reponame>anandagopal6/azure-functions-python-worker
# This function app is to ensure the code outside main() function
# should only get loaded once in __init__.py
from .count import invoke, get_invoke_count, reset_count
invoke()
def main(req):
count = get_invoke_count()
reset_count()
return f'executed count = {count}'
| 2.1875
| 2
|
src/call_variants.py
|
NCBI-Hackathons/PSST
| 5
|
12781427
|
<reponame>NCBI-Hackathons/PSST<filename>src/call_variants.py
#!/usr/bin/env python
# Built-in python packages
from __future__ import division # This modifies Python 2.7 so that any expression of the form int/int returns a float
import getopt
import sys
import os
from itertools import combinations
from multiprocessing.dummy import Pool
# Project-specific packages
from queries_with_ref_bases import query_contains_ref_bases
def get_accession_map(fasta_path):
'''
Suppose in the FASTA file used as reference for makeblastdb, there are n sequences.
Magic-BLAST labels these sequences as the order in which they appear when outputting in tabulated format.
For example, suppose that the SNP rs0001 appears second from the top in the FASTA file.
Then Magic-BLAST assigns the label '1' to rs0001 whenever it appears in an alignment.
This function returns a dictionary that serves as a map from integers (in string datatype) to accessions
e.g. accession_map['1'] == rs0001 in our above example.
Inputs
- (str) fasta_path: path to the FASTA file used as reference for makeblastdb
Outputs
- (dict) accession_map: the map from integers to accessions
'''
accession_map = {}
with open(fasta_path,'r') as fasta:
id_number = 0
for line in fasta:
if line[0] == ">":
accession = line[1:].rstrip()
accession_map[str(id_number)] = accession
id_number += 1
return accession_map
def get_mbo_paths(directory):
'''
Given a directory, retrieves the paths of all files within the directory whose extension is .mbo
Inputs
- (str) directory: directory to be scoured for mbo files
Outputs
- paths: a dictionary where the keys are accessions and the values are paths
'''
paths = {}
for file in os.listdir(directory):
if file.endswith(".mbo"):
# We only want the accession number, not the extension as well
accession = os.path.basename(file).split('.')[0]
path = os.path.join(directory,file)
paths[accession] = path
return paths
def get_sra_alignments(map_paths_and_partition):
'''
Given a list of paths as described in the function get_mbo_paths, retrieves the BTOP string for each
alignment.
Inputs
- map_paths_and_partition: a dict which contains the following:
- partition: the list of paths to .mbo files to read
- paths: a list of pairs where the first entry of the pair is the accession and the second is the path
- (dict) accession_map: the map between integers and accessions
Outputs
- a dictionary where keys are SRA accessions and the values are alignment dictionaries
'''
accession_map = map_paths_and_partition['map']
paths = map_paths_and_partition['paths']
partition = map_paths_and_partition['partition']
sra_alignments = {}
for accession in partition:
path = paths[accession]
alignments = []
with open(path,'r') as mbo:
for line in mbo:
tokens = line.split()
# Skip the line if it is commented, the number of fields isn't equal to 25 or
# the query read was not aligned
if line[0] != "#" and len(tokens) == 25 and tokens[1] != "-":
var_acc = accession_map[ tokens[1] ]
ref_start = int(tokens[8])
ref_stop = int(tokens[9])
if ref_start > ref_stop:
temp = ref_start
ref_start = ref_stop
ref_stop = temp
btop = tokens[16]
alignment = { 'var_acc': var_acc, 'ref_start': ref_start,\
'ref_stop': ref_stop, 'btop': btop }
alignments.append( alignment )
sra_alignments[accession] = alignments
return sra_alignments
def get_var_info(path):
'''
Retrieves the flanking sequence lengths for the SNP sequences
Inputs
- path: path to the file that contains the flanking sequence lengths
Outputs
- var_info: a dictionary where the keys are SNP accessions and the values are tuples where the first entry\
is the start position of the variant, the second is the stop position of the variant and \
the third is the length of the variant sequence
'''
var_info = {}
with open(path,'r') as input_file:
for line in input_file:
tokens = line.split()
if len(tokens) == 4:
accession = tokens[0]
start = int(tokens[1])
stop = int(tokens[2])
length = int(tokens[3])
var_info[accession] = {'start':start,'stop':stop,'length':length}
return var_info
def call_variants(var_freq):
'''
Determines which variants exist in a given SRA dataset given the number of reads that do and do not contain
the var
Inputs
- var_freq: a dict where the keys are SNP accessions and the values are dicts which contain the frequency of
reads that do and reads that do not contain the SNP
Outputs
- variants: dict where the keys are SRA accessions and the value is another dict that contains the homozgyous and
heterozygous variants in separate lists
'''
variants = {'heterozygous':[],'homozygous':[]}
for var_acc in var_freq:
frequencies = var_freq[var_acc]
true = frequencies['true']
false = frequencies['false']
try:
percentage = true/(true+false)
if percentage > 0.8: # For now, we use this simple heuristic.
variants['homozygous'].append(var_acc)
elif percentage > 0.3:
variants['heterozygous'].append(var_acc)
except ZeroDivisionError: # We ignore division errors because they correspond to no mapped reads
pass
return variants
def call_sra_variants(alignments_and_info):
'''
For all SRA accession, determines which variants exist in the SRA dataset
Inputs
- alignments_and_info: a dict which contains
- sra_alignments: dict where the keys are SRA accessions and the values are lists of alignment dicts
- var_info: dict where the keys are variant accessions and the values are information concerning the variants
- keys: list which contains the keys of the SRA accessions to analyze
Outputs
- variants: dict where the keys are SRA accessions and the value is another dict that contains the homozgyous and
heterozygous variants in separate lists
'''
sra_alignments = alignments_and_info['alignments']
var_info = alignments_and_info['alignments']
keys = alignments_and_info['keys']
variants = {}
for sra_acc in keys:
alignments = sra_alignments[sra_acc]
var_freq = {}
for alignment in alignments:
var_acc = alignment['var_acc']
# Get the flank information
info = var_info[var_acc]
if var_acc not in var_freq:
var_freq[var_acc] = {'true':0,'false':0}
# Determine whether the variant exists in the particular SRA dataset
var_called = query_contains_ref_bases(alignment,info)
if var_called == True:
var_freq[var_acc]['true'] += 1
elif var_called == False:
var_freq[var_acc]['false'] += 1
sra_variants = call_variants(var_freq)
variants[sra_acc] = sra_variants
return variants
def create_tsv(variants,output_path):
'''
Creates a TSV file containing the set of variants each SRA dataset contains.
Inputs
- variants: dict where the keys are SRA accessions and the value is another dict that contains the homozgyous and
heterozygous variants in separate lists
- output_path: path to where to construct the output file
'''
with open(output_path,'w') as tsv:
header = "SRA\tHeterozygous SNPs\tHomozygous SNPs\n"
tsv.write(header)
for sra_acc in variants:
line = "%s" % (sra_acc)
sra_variants = variants[sra_acc]
line = line + "\t"
for var_acc in sra_variants['heterozygous']:
line = line + var_acc + ","
line = line + "\t"
for var_acc in sra_variants['homozygous']:
line = line + var_acc + ","
tsv.write( line.rstrip() )
tsv.write('\n')
def create_variant_matrix(variants):
'''
Returns an adjacency matrix that represents the graph constructed out of the variants such that:
1. Every vertex represents a variant and every variant is represented by a vertex
2. An edge (line) connects two vertices if and only if there exists an SRA dataset that contains both of the
corresponding variants
The matrix is represented by a dictionary of dictionaries. To save memory, we do not store 0 entries or variants
without any incident edges.
Inputs
- variants: dict where the keys are SRA accessions and the value is another dict that contains the homozgyous and
heterozygous variants in separate lists
Outputs
- matrix: a dict which, for any two keys (variants) variant_1 and variant_2, satisfies the following -
1. type(matrix[variant_1]) is DictType
2. type(matrix[variant_1][variant_2]) is IntType and matrix[variant_1][variant_2] >= 1
3. matrix[variant_1][variant_2] == matrix[variant_2][variant_1]
all of which holds if and only if variant_1 and variant_2 exist in the dictionaries as keys
'''
matrix = {}
for sra_acc in variants:
sra_variants = variants[sra_acc]
all_variants = []
if 'homozygous' in sra_variants:
all_variants += sra_variants['homozygous']
if 'heterozygous' in sra_variants:
all_variants += sra_variants['heterozygous']
# Get all of the unique 2-combinations of the variants
two_combinations = list( combinations(all_variants,2) )
for pair in two_combinations:
variant_1 = pair[0]
variant_2 = pair[1]
if variant_1 not in matrix:
matrix[variant_1] = {}
if variant_2 not in matrix:
matrix[variant_2] = {}
if variant_2 not in matrix[variant_1]:
matrix[variant_1][variant_2] = 0
if variant_1 not in matrix[variant_2]:
matrix[variant_2][variant_1] = 0
matrix[variant_1][variant_2] += 1
matrix[variant_2][variant_1] = matrix[variant_1][variant_2]
return matrix
def partition(lst,n):
'''
Partitions a list into n lists
Inputs
- (list) lst
Outputs
- partitioned_lists: a list of lists
'''
division = len(lst)/float(n)
return [ lst[int(round(division * i)): int(round(division * (i + 1)))] for i in xrange(n) ]
def combine_list_of_dicts(list_of_dicts):
'''
Given a list of dicts, returns a single dict such that (key,value) pairs from each original dict exists in the
new single dict
Inputs
- list_of_dicts: a list of dicts
Outputs
- combined_dict
'''
combined_dict = list_of_dicts[0]
for i in range(1,len(list_of_dicts)):
combined_dict.update( list_of_dicts[i] )
return combined_dict
def unit_tests():
variants = {}
variants['sra_1'] = {'homozygous':['a','b'],'heterozygous':['c','e']}
variants['sra_2'] = {'homozygous':['a','c','d']}
variants['sra_3'] = {'heterozygous':['b','d','e']}
matrix = create_variant_matrix(variants)
print(matrix)
for variant_1 in matrix:
for variant_2 in matrix[variant_1]:
left_hand_side = matrix[variant_1][variant_2]
right_hand_side = matrix[variant_2][variant_1]
assert( left_hand_side >= 1 )
assert( right_hand_side >= 1 )
assert( left_hand_side == right_hand_side )
print("All unit tests passed!")
if __name__ == "__main__":
help_message = "Description: Given a directory with Magic-BLAST output files where each output file\n" \
+ " contains the alignment between an SRA dataset and known variants in a human\n" \
+ " genome, this script determines which variants each SRA dataset contains\n" \
+ " using a heuristic."
usage_message = "Usage: %s\n[-h (help and usage)]\n[-m <directory containing .mbo files>]\n" % (sys.argv[0]) \
+ "[-v <path to variant info file>]\n[-f <path to the reference FASTA file>]\n"\
+ "[-o <output path for TSV file>]\n[-p <num of threads>]\n[-t <unit tests>]"
options = "htm:v:f:o:p:"
try:
opts,args = getopt.getopt(sys.argv[1:],options)
except getopt.GetoptError:
print("Error: unable to read command line arguments.")
sys.exit(1)
if len(sys.argv) == 1:
print(help_message)
print(usage_message)
sys.exit()
mbo_directory = None
var_info_path = None
output_path = None
fasta_path = None
threads = 1
for opt, arg in opts:
if opt == '-h':
print(help_message)
print(usage_message)
sys.exit(0)
elif opt == '-m':
mbo_directory = arg
elif opt == '-v':
var_info_path = arg
elif opt == '-o':
output_path = arg
elif opt == '-f':
fasta_path = arg
elif opt == '-p':
threads = arg
elif opt == '-t':
unit_tests()
sys.exit(0)
opts_incomplete = False
if mbo_directory == None:
print("Error: please provide the directory containing your Magic-BLAST output files.")
opts_incomplete = True
if var_info_path == None:
print("Error: please provide the path to the file containing flanking sequence information.")
opts_incomplete = True
if output_path == None:
print("Error: please provide an output path for the TSV file.")
opts_incomplete = True
if fasta_path == None:
print("Error: please provide the path to the FASTA file used as reference for makeblastdb")
opts_incomplete = True
if opts_incomplete:
print(usage_message)
sys.exit(1)
var_info = get_var_info(var_info_path)
accession_map = get_accession_map(fasta_path)
paths = get_mbo_paths(mbo_directory)
# Retrieve the alignments concurrently
get_alignments_threads = min(threads,len(paths.keys()))
paths_partitions = partition( paths.keys(), get_alignments_threads )
map_paths_and_partitions = [{'map':accession_map,'paths':paths,'partition':path_partition} \
for path_partition in paths_partitions]
pool = Pool(processes=get_alignments_threads)
sra_alignments_pool = pool.map(get_sra_alignments,map_paths_and_partitions)
pool.close()
pool.join()
sra_alignments = combine_list_of_dicts(sra_alignments_pool)
# Call variants concurrently
sra_keys = sra_alignments.keys()
variant_call_threads = min( threads, len(sra_keys) )
keys_partitions = partition(sra_keys, variant_call_threads)
# alignments, info and key partitions
alignments_and_info_part = [{'alignments':sra_alignments,'keys':keys,'info':var_info} for keys in keys_partitions]
pool = Pool(processes=variant_call_threads)
variants_pool = pool.map(call_sra_variants,alignments_and_info_part)
pool.close()
pool.join()
called_variants = combine_list_of_dicts(variants_pool)
create_tsv(called_variants,output_path)
matrix = create_variant_matrix(called_variants)
| 2.921875
| 3
|
scheme/tests/16.py
|
sfailsthy/CS-61A
| 0
|
12781428
|
test = {
'name': 'Problem 16',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
scm> (define y 1)
7f8ce7b9b26d2b5922718b99265fafdc
# locked
scm> (define f (mu (x) (+ x y)))
17c5c9131eea78a6dfb3175a8e97e160
# locked
scm> (define g (lambda (x y) (f (+ x x))))
280bf507069c60ae8be75781ba444342
# locked
scm> (g 3 7)
824c8a87e1a23e1693a36ab2b26e2ceb
# locked
""",
'hidden': False,
'locked': True
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'scheme'
}
]
}
| 1.757813
| 2
|
Torch_rl/model/Network.py
|
afei339/Torch-rl
| 1
|
12781429
|
<reponame>afei339/Torch-rl<gh_stars>1-10
import torch
import numpy as np
from torch import nn
from collections import OrderedDict
from torch.distributions import Normal, Categorical
from torch.autograd import Variable
from copy import deepcopy
# from graphviz import Digraph
#
# def make_dot(var, params=None):
# """ Produces Graphviz representation of PyTorch autograd graph
# Blue nodes are the Variables that require grad, orange are Tensors
# saved for backward in torch.autograd.Function
# Args:
# var: output Variable
# params: dict of (name, Variable) to add names to node that
# require grad (TODO: make optional)
# """
# if params is not None:
# assert isinstance(params.values()[0], Variable)
# param_map = {id(v): k for k, v in params.items()}
#
# node_attr = dict(style='filled',
# shape='box',
# align='left',
# fontsize='12',
# ranksep='0.1',
# height='0.2')
# dot = Digraph(node_attr=node_attr, graph_attr=dict(size="12,12"))
# seen = set()
#
# def size_to_str(size):
# return '(' + (', ').join(['%d' % v for v in size]) + ')'
#
# def add_nodes(var):
# if var not in seen:
# if torch.is_tensor(var):
# dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')
# elif hasattr(var, 'variable'):
# u = var.variable
# name = param_map[id(u)] if params is not None else ''
# node_name = '%s\n %s' % (name, size_to_str(u.size()))
# dot.node(str(id(var)), node_name, fillcolor='lightblue')
# else:
# dot.node(str(id(var)), str(type(var).__name__))
# seen.add(var)
# if hasattr(var, 'next_functions'):
# for u in var.next_functions:
# if u[0] is not None:
# dot.edge(str(id(u[0])), str(id(var)))
# add_nodes(u[0])
# if hasattr(var, 'saved_tensors'):
# for t in var.saved_tensors:
# dot.edge(str(id(t)), str(id(var)))
# add_nodes(t)
#
# add_nodes(var.grad_fn)
# return dot
#
# def show(net):
# x = Variable(torch.randn(net.layer_infor[0]))
# y = net(x)
# g = make_dot(y)
# # g.view()
# return g
class DenseNet(nn.Module):
def __init__(self, input_size, output_size, hidden_layer=[64, 64],
hidden_activate=nn.ReLU(), output_activate=None,
BatchNorm = False):
super(DenseNet, self).__init__()
first_placeholder = np.insert(np.array(hidden_layer), 0, input_size)
second_placeholder = np.array(hidden_layer)
self._layer_num = np.append(first_placeholder, output_size)
self.layer = []
for i in range(len(second_placeholder)):
layer = []
layer.append(('linear'+str(i), nn.Linear(first_placeholder[i], second_placeholder[i], bias=True)))
layer.append(('activation'+str(i),hidden_activate))
if BatchNorm:
layer.append(('BatchNormalization'+str(i), nn.BatchNorm1d(second_placeholder[i], eps=1e-05, momentum=0.1, affine=True,
track_running_stats=True)))
self.layer.append(nn.Sequential(OrderedDict(layer)))
output_layerlayer = [("output_layer",nn.Linear(first_placeholder[-1], output_size, bias=True))]
if output_activate is not None:
output_layerlayer.append(("output_activation",output_activate))
self.layer.append(nn.Sequential(OrderedDict(output_layerlayer)))
self.linears = nn.ModuleList(self.layer)
def forward(self, x):
for layer in self.linears:
x = layer(x)
return x
@property
def layer_infor(self):
return list(self._layer_num)
def to_gpu(self, device=None):
self.linears.cuda(device=device)
class LSTM_Dense(nn.Module):
def __init__(self, input_size, output_size, lstm_unit=64, lstm_layer=1, dense_layer=[64, 64],
hidden_activate=nn.ReLU(), output_activate=None,
BatchNorm = False):
super(LSTM_Dense, self).__init__()
self.lstm_unit = lstm_unit
self.hidden_activate = nn.ReLU()
self.Dense = DenseNet(lstm_unit, output_size, hidden_layer=dense_layer,
hidden_activate=hidden_activate, output_activate=output_activate, BatchNorm=BatchNorm)
self._layer_num = [input_size] + [lstm_unit] * lstm_layer + dense_layer + [output_size]
self.LSTM = nn.LSTM(input_size=input_size,
hidden_size=lstm_unit,
num_layers=lstm_layer)
def init_H_C(self,batch_size):
return (Variable(torch.zeros(1, batch_size, self.lstm_unit)),
Variable(torch.zeros(1, batch_size, self.lstm_unit)))
def forward(self, x, h_state=None):
if h_state is None:
h_state = self.init_H_C(x.size()[1])
x, h_state = self.LSTM(x, h_state)
x = self.hidden_activate(x)
x = self.Dense(x)
return x, h_state
def to_gpu(self, device=None):
self.LSTM.cuda(device=device)
self.Dense.cuda(device=device)
class CNN_2D_Dense(nn.Module):
def __init__(self, input_size, output_size,
# CNN_layer
kernal_size=[(32, 7), (64, 5), (128, 3)],
stride=1, padding=0, padding_mode='zeros',
# pooling
pooling_way = "Max", pooling_kernal= 2, pooling_stride = 2,
# Dense
dense_layer = [64, 64], hidden_activate=nn.ReLU(), output_activate=None,
BatchNorm = False):
super(CNN_2D_Dense, self).__init__()
first = [input_size[0]]+[kernal[0] for kernal in kernal_size ]
cnnlayer=[]
for flag, kernal in enumerate(kernal_size):
cnnlayer.append(("cnn" + str(flag), nn.Conv2d(first[flag], kernal[0], kernel_size=kernal[1],
stride=stride, padding=padding, padding_mode=padding_mode)))
cnnlayer.append(("cnn_activate" + str(flag), deepcopy(hidden_activate)))
if pooling_way == "Max":
cnnlayer.append(("pooling" + str(flag),torch.nn.MaxPool2d(kernel_size=pooling_kernal, stride=pooling_stride)))
elif pooling_way == "Ave":
cnnlayer.append(("pooling" + str(flag),torch.nn.AvgPool2d(kernel_size=pooling_kernal, stride=pooling_stride)))
self.CNN = nn.Sequential(OrderedDict(cnnlayer))
self.input_dense = self.size_cal(input_size)
self.Dendse = DenseNet(self.input_dense, output_size, hidden_layer=dense_layer,
hidden_activate=hidden_activate, output_activate=output_activate,BatchNorm=BatchNorm)
def size_cal(self, input_size):
test_input = torch.rand((1,)+input_size)
test_out = self.CNN(test_input)
return test_out.size(1)*test_out.size(2)*test_out.size(3)
def forward(self, x):
x = self.CNN(x)
x = x.view(x.size(0), -1)
x = self.Dendse(x)
return x
def to_gpu(self, device=None):
self.CNN.cuda(device=device)
self.Dense.cuda(device=device)
class CNN_2D_LSTM_Dense(nn.Module):
def __init__(self, input_size, output_size,
# CNN_layer
kernal_size=[(32, 7), (64, 5), (128, 3)],
stride=1, padding=0, padding_mode='zeros',
# pooling
pooling_way = "Max", pooling_kernal= 2, pooling_stride = 2,
# LSTM
lstm_unit=64, lstm_layer=1,
# Dense
dense_layer = [64, 64], hidden_activate=nn.ReLU(), output_activate=None,
BatchNorm = False):
super(CNN_2D_LSTM_Dense, self).__init__()
first = [input_size[0]]+[kernal[0] for kernal in kernal_size ]
if pooling_way == "Max":
poollayer = torch.nn.MaxPool2d(kernel_size=pooling_kernal, stride=pooling_stride)
elif pooling_way == "Max":
poollayer = torch.nn.AvgPool2d(kernel_size=pooling_kernal, stride=pooling_stride)
cnnlayer=[]
for flag, kernal in enumerate(kernal_size):
cnnlayer.append(("cnn" + str(flag), nn.Conv2d(first[flag], kernal[0], kernel_size=kernal[1],
stride=stride,padding=padding,padding_mode=padding_mode)))
cnnlayer.append(("cnn_activate" + str(flag), deepcopy(hidden_activate)))
cnnlayer.append(("pooling" + str(flag), deepcopy(poollayer)))
self.CNN = nn.Sequential(OrderedDict(cnnlayer))
self.input_lstm = self.size_cal(input_size)
self.lstm_unit = lstm_unit
self.LSTM = nn.LSTM(input_size=self.input_lstm,
hidden_size=lstm_unit,
num_layers=lstm_layer)
self.Dendse = DenseNet(lstm_unit, output_size, hidden_layer=dense_layer,
hidden_activate=hidden_activate, output_activate=output_activate,BatchNorm=BatchNorm)
def init_H_C(self,batch_size):
return (Variable(torch.zeros(1, batch_size, self.lstm_unit)),
Variable(torch.zeros(1, batch_size, self.lstm_unit)))
def size_cal(self, input_size):
test_input = torch.rand((1,)+input_size)
test_out = self.CNN(test_input)
return test_out.size(1)*test_out.size(2)*test_out.size(3)
def forward(self, x, h = None):
batch_size = x.size(1)
squence_size = x.size(0)
conjection = ()
for time in range(squence_size):
cnnout = self.CNN(x[time])
cnnout = cnnout.view(batch_size, -1)
cnnout = cnnout.unsqueeze(0)
conjection = conjection + (cnnout,)
x = torch.cat(conjection, dim=0)
if h is None:
h = self.init_H_C(batch_size)
x, h = self.LSTM(x, h)
x = self.Dendse(x)
return x
def to_gpu(self, device=None):
self.CNN.cuda(device=device)
self.LSTM.cuda(device=device)
self.Dense.cuda(device=device)
| 2.390625
| 2
|
layers.py
|
gemilepus/ShadeSketch
| 313
|
12781430
|
"""
ShadeSketch
https://github.com/qyzdao/ShadeSketch
Learning to Shadow Hand-drawn Sketches
<NAME>, <NAME>, <NAME>
Copyright (C) 2020 The respective authors and Project HAT. All rights reserved.
Licensed under MIT license.
"""
import tensorflow as tf
# import keras
keras = tf.keras
K = keras.backend
Layer = keras.layers.Layer
Conv2D = keras.layers.Conv2D
InputSpec = keras.layers.InputSpec
image_data_format = K.image_data_format
activations = keras.activations
initializers = keras.initializers
regularizers = keras.regularizers
constraints = keras.constraints
class Composite(Layer):
def __init__(self,
data_format='channels_last',
**kwargs):
self.data_format = data_format
super(Composite, self).__init__(**kwargs)
def call(self, inputs):
line_inputs, shade_inputs = inputs
return line_inputs + (shade_inputs + 1) * 0.25
def compute_output_shape(self, input_shape):
return input_shape[0]
class PixelwiseConcat(Layer):
def __init__(self,
data_format='channels_last',
**kwargs):
self.data_format = data_format
super(PixelwiseConcat, self).__init__(**kwargs)
def call(self, inputs):
pixel_inputs, unit_inputs = inputs
if self.data_format == 'channels_first':
repeated_unit_inputs = tf.tile(
K.expand_dims(K.expand_dims(unit_inputs, 2), 2),
[1, K.shape(pixel_inputs)[2], K.shape(pixel_inputs)[3], 1]
)
elif self.data_format == 'channels_last':
repeated_unit_inputs = tf.tile(
K.expand_dims(K.expand_dims(unit_inputs, 1), 1),
[1, K.shape(pixel_inputs)[1], K.shape(pixel_inputs)[2], 1]
)
return K.concatenate([pixel_inputs, repeated_unit_inputs])
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
return (input_shape[0][0], input_shape[0][1] + input_shape[1][1], input_shape[0][2], input_shape[0][3])
elif self.data_format == 'channels_last':
return (input_shape[0][0], input_shape[0][1], input_shape[0][2], input_shape[0][3] + input_shape[1][1])
class SubPixelConv2D(Conv2D):
def __init__(self,
filters,
kernel_size,
r,
padding='same',
data_format=None,
strides=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(SubPixelConv2D, self).__init__(
filters=r * r * filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.r = r
if hasattr(tf.nn, 'depth_to_space'):
self.depth_to_space = tf.nn.depth_to_space
else:
self.depth_to_space = tf.depth_to_space
def phase_shift(self, I):
if self.data_format == 'channels_first':
return self.depth_to_space(I, self.r, data_format="NCHW")
elif self.data_format == 'channels_last':
return self.depth_to_space(I, self.r, data_format="NHWC")
def call(self, inputs):
return self.phase_shift(super(SubPixelConv2D, self).call(inputs))
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
n, c, h, w = super(SubPixelConv2D, self).compute_output_shape(input_shape)
elif self.data_format == 'channels_last':
n, h, w, c = super(SubPixelConv2D, self).compute_output_shape(input_shape)
if h is not None:
h = int(self.r * h)
if w is not None:
w = int(self.r * w)
c = int(c / (self.r * self.r))
if self.data_format == 'channels_first':
return (n, c, h, w)
elif self.data_format == 'channels_last':
return (n, h, w, c)
def get_config(self):
config = super(Conv2D, self).get_config()
config.pop('rank')
config.pop('dilation_rate')
config['filters'] /= self.r * self.r
config['r'] = self.r
return config
class SelfAttention(Layer):
def __init__(self,
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(SelfAttention, self).__init__(**kwargs)
self.data_format = data_format
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
kernel_size = (1, 1)
self.filters = int(input_shape[channel_axis])
self.kernel_f = self.add_weight(shape=kernel_size + (self.filters, self.filters // 8),
initializer=self.kernel_initializer,
name='kernel_f',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.kernel_g = self.add_weight(shape=kernel_size + (self.filters, self.filters // 8),
initializer=self.kernel_initializer,
name='kernel_g',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.kernel_h = self.add_weight(shape=kernel_size + (self.filters, self.filters),
initializer=self.kernel_initializer,
name='kernel_h',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias_f = self.add_weight(shape=(self.filters // 8,),
initializer=self.bias_initializer,
name='bias_f',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.bias_g = self.add_weight(shape=(self.filters // 8,),
initializer=self.bias_initializer,
name='bias_g',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.bias_h = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias_h',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias_f = None
self.bias_g = None
self.bias_h = None
self.gamma = self.add_weight(
name='gamma',
shape=(1,),
initializer=initializers.Constant(0)
)
super(SelfAttention, self).build(input_shape)
def call(self, inputs):
f = K.conv2d(inputs,
self.kernel_f,
data_format=self.data_format,
strides=(1, 1),
dilation_rate=(1, 1)) # [bs, h, w, c']
g = K.conv2d(inputs,
self.kernel_g,
data_format=self.data_format,
strides=(1, 1),
dilation_rate=(1, 1)) # [bs, h, w, c']
h = K.conv2d(inputs,
self.kernel_h,
data_format=self.data_format,
strides=(1, 1),
dilation_rate=(1, 1)) # [bs, h, w, c]
if self.use_bias:
f = K.bias_add(f, self.bias_f, data_format=self.data_format) # [bs, h, w, c']
g = K.bias_add(g, self.bias_g, data_format=self.data_format) # [bs, h, w, c']
h = K.bias_add(h, self.bias_h, data_format=self.data_format) # [bs, h, w, c]
# N = h * w
s = K.dot(K.batch_flatten(g), K.transpose(K.batch_flatten(f))) # # [bs, N, N]
beta = K.softmax(s) # attention map
o = K.dot(beta, K.batch_flatten(h)) # [bs, N, C]
o = K.reshape(o, K.shape(inputs)) # [bs, h, w, C]
return self.activation(self.gamma * o + inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'activation': activations.serialize(self.activation),
'data_format': self.data_format,
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(SelfAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
"""
Implementation of Coordinate Channel
keras-coordconv
MIT License
Copyright (c) 2018 <NAME>
https://github.com/titu1994/keras-coordconv/blob/master/coord.py
"""
class _CoordinateChannel(Layer):
""" Adds Coordinate Channels to the input tensor.
# Arguments
rank: An integer, the rank of the input data-uniform,
e.g. "2" for 2D convolution.
use_radius: Boolean flag to determine whether the
radius coordinate should be added for 2D rank
inputs or not.
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, ..., channels)` while `"channels_first"` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
# Input shape
ND tensor with shape:
`(samples, channels, *)`
if `data_format` is `"channels_first"`
or ND tensor with shape:
`(samples, *, channels)`
if `data_format` is `"channels_last"`.
# Output shape
ND tensor with shape:
`(samples, channels + 2, *)`
if `data_format` is `"channels_first"`
or 5D tensor with shape:
`(samples, *, channels + 2)`
if `data_format` is `"channels_last"`.
# References:
- [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247)
"""
def __init__(self, rank,
use_radius=False,
data_format='channels_last',
**kwargs):
super(_CoordinateChannel, self).__init__(**kwargs)
if data_format not in [None, 'channels_first', 'channels_last']:
raise ValueError('`data_format` must be either "channels_last", "channels_first" '
'or None.')
self.rank = rank
self.use_radius = use_radius
self.data_format = data_format
self.axis = 1 if image_data_format() == 'channels_first' else -1
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[self.axis]
self.input_spec = InputSpec(min_ndim=self.rank + 2,
axes={self.axis: input_dim})
self.built = True
def call(self, inputs, training=None, mask=None):
input_shape = K.shape(inputs)
if self.rank == 1:
input_shape = [input_shape[i] for i in range(3)]
batch_shape, dim, channels = input_shape
xx_range = tf.tile(K.expand_dims(K.arange(0, dim), axis=0),
K.stack([batch_shape, 1]))
xx_range = K.expand_dims(xx_range, axis=-1)
xx_channels = K.cast(xx_range, K.floatx())
xx_channels = xx_channels / K.cast(dim - 1, K.floatx())
xx_channels = (xx_channels * 2) - 1.
outputs = K.concatenate([inputs, xx_channels], axis=-1)
if self.rank == 2:
if self.data_format == 'channels_first':
inputs = K.permute_dimensions(inputs, [0, 2, 3, 1])
input_shape = K.shape(inputs)
input_shape = [input_shape[i] for i in range(4)]
batch_shape, dim1, dim2, channels = input_shape
xx_ones = tf.ones(K.stack([batch_shape, dim2]), dtype='int32')
xx_ones = K.expand_dims(xx_ones, axis=-1)
xx_range = tf.tile(K.expand_dims(K.arange(0, dim1), axis=0),
K.stack([batch_shape, 1]))
xx_range = K.expand_dims(xx_range, axis=1)
xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])
xx_channels = K.expand_dims(xx_channels, axis=-1)
xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])
yy_ones = tf.ones(K.stack([batch_shape, dim1]), dtype='int32')
yy_ones = K.expand_dims(yy_ones, axis=1)
yy_range = tf.tile(K.expand_dims(K.arange(0, dim2), axis=0),
K.stack([batch_shape, 1]))
yy_range = K.expand_dims(yy_range, axis=-1)
yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])
yy_channels = K.expand_dims(yy_channels, axis=-1)
yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])
xx_channels = K.cast(xx_channels, K.floatx())
xx_channels = xx_channels / K.cast(dim1 - 1, K.floatx())
xx_channels = (xx_channels * 2) - 1.
yy_channels = K.cast(yy_channels, K.floatx())
yy_channels = yy_channels / K.cast(dim2 - 1, K.floatx())
yy_channels = (yy_channels * 2) - 1.
outputs = K.concatenate([inputs, xx_channels, yy_channels], axis=-1)
if self.use_radius:
rr = K.sqrt(K.square(xx_channels - 0.5) +
K.square(yy_channels - 0.5))
outputs = K.concatenate([outputs, rr], axis=-1)
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 3, 1, 2])
if self.rank == 3:
if self.data_format == 'channels_first':
inputs = K.permute_dimensions(inputs, [0, 2, 3, 4, 1])
input_shape = K.shape(inputs)
input_shape = [input_shape[i] for i in range(5)]
batch_shape, dim1, dim2, dim3, channels = input_shape
xx_ones = tf.ones(K.stack([batch_shape, dim3]), dtype='int32')
xx_ones = K.expand_dims(xx_ones, axis=-1)
xx_range = tf.tile(K.expand_dims(K.arange(0, dim2), axis=0),
K.stack([batch_shape, 1]))
xx_range = K.expand_dims(xx_range, axis=1)
xx_channels = K.batch_dot(xx_ones, xx_range, axes=[2, 1])
xx_channels = K.expand_dims(xx_channels, axis=-1)
xx_channels = K.permute_dimensions(xx_channels, [0, 2, 1, 3])
xx_channels = K.expand_dims(xx_channels, axis=1)
xx_channels = tf.tile(xx_channels,
[1, dim1, 1, 1, 1])
yy_ones = tf.ones(K.stack([batch_shape, dim2]), dtype='int32')
yy_ones = K.expand_dims(yy_ones, axis=1)
yy_range = tf.tile(K.expand_dims(K.arange(0, dim3), axis=0),
K.stack([batch_shape, 1]))
yy_range = K.expand_dims(yy_range, axis=-1)
yy_channels = K.batch_dot(yy_range, yy_ones, axes=[2, 1])
yy_channels = K.expand_dims(yy_channels, axis=-1)
yy_channels = K.permute_dimensions(yy_channels, [0, 2, 1, 3])
yy_channels = K.expand_dims(yy_channels, axis=1)
yy_channels = tf.tile(yy_channels,
[1, dim1, 1, 1, 1])
zz_range = tf.tile(K.expand_dims(K.arange(0, dim1), axis=0),
K.stack([batch_shape, 1]))
zz_range = K.expand_dims(zz_range, axis=-1)
zz_range = K.expand_dims(zz_range, axis=-1)
zz_channels = tf.tile(zz_range,
[1, 1, dim2, dim3])
zz_channels = K.expand_dims(zz_channels, axis=-1)
xx_channels = K.cast(xx_channels, K.floatx())
xx_channels = xx_channels / K.cast(dim2 - 1, K.floatx())
xx_channels = xx_channels * 2 - 1.
yy_channels = K.cast(yy_channels, K.floatx())
yy_channels = yy_channels / K.cast(dim3 - 1, K.floatx())
yy_channels = yy_channels * 2 - 1.
zz_channels = K.cast(zz_channels, K.floatx())
zz_channels = zz_channels / K.cast(dim1 - 1, K.floatx())
zz_channels = zz_channels * 2 - 1.
outputs = K.concatenate([inputs, zz_channels, xx_channels, yy_channels],
axis=-1)
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 4, 1, 2, 3])
return outputs
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[self.axis]
if self.use_radius and self.rank == 2:
channel_count = 3
else:
channel_count = self.rank
output_shape = list(input_shape)
output_shape[self.axis] = input_shape[self.axis] + channel_count
return tuple(output_shape)
def get_config(self):
config = {
'rank': self.rank,
'use_radius': self.use_radius,
'data_format': self.data_format
}
base_config = super(_CoordinateChannel, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class CoordinateChannel1D(_CoordinateChannel):
""" Adds Coordinate Channels to the input tensor of rank 1.
# Arguments
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, ..., channels)` while `"channels_first"` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
# Input shape
3D tensor with shape: `(batch_size, steps, input_dim)`
# Output shape
3D tensor with shape: `(batch_size, steps, input_dim + 2)`
# References:
- [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247)
"""
def __init__(self, data_format=None, **kwargs):
super(CoordinateChannel1D, self).__init__(
rank=1,
use_radius=False,
data_format=data_format,
**kwargs
)
def get_config(self):
config = super(CoordinateChannel1D, self).get_config()
config.pop('rank')
config.pop('use_radius')
return config
class CoordinateChannel2D(_CoordinateChannel):
""" Adds Coordinate Channels to the input tensor.
# Arguments
use_radius: Boolean flag to determine whether the
radius coordinate should be added for 2D rank
inputs or not.
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, ..., channels)` while `"channels_first"` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(samples, rows, cols, channels)`
if `data_format` is `"channels_last"`.
# Output shape
4D tensor with shape:
`(samples, channels + 2/3, rows, cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(samples, rows, cols, channels + 2/3)`
if `data_format` is `"channels_last"`.
If `use_radius` is set, then will have 3 additional filers,
else only 2 additional filters will be added.
# References:
- [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247)
"""
def __init__(self, use_radius=False,
data_format=None,
**kwargs):
super(CoordinateChannel2D, self).__init__(
rank=2,
use_radius=use_radius,
data_format=data_format,
**kwargs
)
def get_config(self):
config = super(CoordinateChannel2D, self).get_config()
config.pop('rank')
return config
| 2.515625
| 3
|
generator.py
|
ugis22/creatingDCGAN
| 0
|
12781431
|
<gh_stars>0
from typing import Tuple, Union
import numpy as np
from keras.layers import (
Conv2DTranspose,
Reshape,
BatchNormalization,
Dense,
Activation
)
from keras.models import Sequential
from keras.optimizers import Adam
class Generator:
def __init__(self,
initial_dimensions: Tuple,
reshape_dimensions: Tuple,
kernel_size: Tuple,
stride_size: Tuple,
output_channels: Union[int, float],
lr: float = 0.00015,
beta: float = 0.5,
loss: str = 'binary_crossentropy'):
"""
The parameters that need to be passed are:
initial_dimensions: A tuple indicating the dimensions of the noise vector
reshape_dimensions: The dimension in which the initial vector is reshaped
kernel: The size of the kernel
stride: The size of how the window is slided
output_channel: If we want a RGB color image, then 3, if grey then 1
lr: the learning rate
beta: the beta factor
loss: The name of the loss function to calculate how the generator is learning.
"""
self.initial_dimensions = initial_dimensions
self.reshape_dimensions = reshape_dimensions
self.kernel_size = kernel_size
self.stride_size = stride_size
self.first_layer_filter = int(self.reshape_dimensions[2]/2)
self.second_layer_filter = int(self.first_layer_filter/2)
self.third_layer_filter = int(self.second_layer_filter/2)
self.output_channels = output_channels
self.lr = lr
self.beta = beta
self.loss = loss
def generator(self):
"""
The function generates an training_images through a deconvolution neural net.
"""
# Define type of neural network. Would be sequential.
generator = Sequential()
# Layer1. Now we project and reshape.
generator.add(Dense(units=np.prod(self.reshape_dimensions),
kernel_initializer='glorot_normal',
input_shape=self.initial_dimensions))
# Reshape
generator.add(Reshape(target_shape=self.reshape_dimensions))
# Normalize
generator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))
# Activation
generator.add(Activation('relu'))
# Now we need to add layers. convolution, bias, activate.
# Convolution
generator.add(Conv2DTranspose(filters=self.first_layer_filter,
kernel_size=self.kernel_size,
strides=self.stride_size,
padding='same',
data_format='channels_last',
kernel_initializer='glorot_normal'))
# Bias
generator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))
# Activate
generator.add(Activation('relu'))
# Convolution
generator.add(Conv2DTranspose(filters=self.second_layer_filter,
kernel_size=self.kernel_size,
strides=self.stride_size,
padding='same',
data_format='channels_last',
kernel_initializer='glorot_normal'))
# Bias
generator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))
# Activate
generator.add(Activation("relu"))
# Convolution
generator.add(Conv2DTranspose(filters=self.third_layer_filter,
kernel_size=self.kernel_size,
strides=self.stride_size,
padding='same',
data_format='channels_last',
kernel_initializer='glorot_normal'))
# Bias
generator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))
# Activate
generator.add(Activation('relu'))
# Last layer. Convolution
generator.add(Conv2DTranspose(filters=self.output_channels,
kernel_size=self.kernel_size,
strides=self.stride_size,
padding='same',
data_format='channels_last',
kernel_initializer='glorot_normal'))
# Activate
generator.add(Activation('tanh'))
optimizer = Adam(lr=self.lr, beta_1=self.beta)
generator.compile(loss=self.loss,
optimizer=optimizer,
metrics=None)
return generator
| 3.3125
| 3
|
src/tests/kafka_test.py
|
dixonwhitmire/lib-kafka
| 0
|
12781432
|
<reponame>dixonwhitmire/lib-kafka<gh_stars>0
import importlib
import os
from unittest import mock
from unittest.mock import MagicMock
from lib_kafka import logging_codes
import unittest
import concurrent.futures
from confluent_kafka.admin import ClusterMetadata, TopicMetadata, PartitionMetadata
from confluent_kafka import KafkaError, KafkaException
from tests import resources_directory
def get_sample_config_path(file_name):
return os.path.join(resources_directory, file_name)
class TestKafkaApiMethods(unittest.TestCase):
def setUp(self) -> None:
os.environ["KAFKA_BROKER_CONFIG_FILE"] = get_sample_config_path('kafka.env')
os.environ["KAFKA_TOPIC_CONFIG_FILE"] = get_sample_config_path('kafka-topic.json')
self.kafka = importlib.import_module('lib_kafka.kafka')
@mock.patch("sys.exit")
@mock.patch("confluent_kafka.admin.AdminClient.create_topics")
def test_create_topics(self, mock_create_topics, mock_sys_exit):
self.kafka.create_topics()
self.assertTrue(mock_create_topics.called)
self.assertEqual(mock_create_topics.call_count, 1)
mock_create_topics.reset_mock()
f = concurrent.futures.Future()
f.set_running_or_notify_cancel()
f.set_result(None)
mock_create_topics.return_value = {'testTopic1': None, 'testTopic2': f}
with self.assertLogs('lib_kafka.kafka', level='ERROR') as cm:
self.kafka.create_topics()
self.assertTrue(mock_create_topics.called)
self.assertEqual(mock_create_topics.call_count, 1)
self.assertTrue(mock_sys_exit.called)
self.assertTrue(len(cm.output) > 0)
mock_create_topics.reset_mock()
kafka_error = MagicMock()
kafka_error.code.return_value = KafkaError.TOPIC_ALREADY_EXISTS
f.result = MagicMock(side_effect=KafkaException(kafka_error))
mock_create_topics.return_value = {'testTopic1': f}
with self.assertLogs('lib_kafka.kafka', level='INFO') as cm:
self.kafka.create_topics()
self.assertTrue(mock_create_topics.called)
self.assertEqual(mock_create_topics.call_count, 1)
self.assertTrue('INFO:lib_kafka.kafka:' + logging_codes.TOPIC_EXISTS % 'testTopic1' in cm.output)
mock_create_topics.reset_mock()
kafka_error = MagicMock()
kafka_error.code.return_value = KafkaError._TIMED_OUT
f.result = MagicMock(side_effect=KafkaException(kafka_error))
mock_create_topics.return_value = {'testTopic1': f}
with self.assertLogs('lib_kafka.kafka', level='ERROR') as cm:
self.kafka.create_topics()
self.assertTrue(mock_create_topics.called)
self.assertEqual(mock_create_topics.call_count, 1)
self.assertTrue(mock_sys_exit.called)
self.assertTrue(len(cm.output) > 0)
mock_create_topics.reset_mock()
kafka_error = MagicMock()
kafka_error.code.return_value = KafkaError.CLUSTER_AUTHORIZATION_FAILED
f.result = MagicMock(side_effect=KafkaException(kafka_error))
mock_create_topics.return_value = {'testTopic1': f}
with self.assertLogs('lib_kafka.kafka', level='ERROR') as cm:
self.kafka.create_topics()
self.assertTrue(mock_create_topics.called)
self.assertEqual(mock_create_topics.call_count, 1)
self.assertTrue(mock_sys_exit.called)
self.assertTrue(len(cm.output) > 0)
mock_create_topics.reset_mock()
f.result = MagicMock(side_effect=Exception)
mock_create_topics.return_value = {'testTopic1': f}
with self.assertLogs('lib_kafka.kafka', level='ERROR') as cm:
self.kafka.create_topics()
self.assertTrue(mock_create_topics.called)
self.assertEqual(mock_create_topics.call_count, 1)
self.assertTrue(mock_sys_exit.called)
self.assertTrue(len(cm.output) > 0)
mock_create_topics.reset_mock()
@mock.patch("sys.exit")
@mock.patch("confluent_kafka.admin.AdminClient.delete_topics")
def test_delete_topic(self, mock_delete_topics, mock_sys_exit):
# Topic delete successful
mock_delete_topics.reset_mock()
f = concurrent.futures.Future()
f.set_running_or_notify_cancel()
f.set_result(None)
mock_delete_topics.return_value = {'testTopic': f}
with self.assertLogs('lib_kafka.kafka', level='INFO') as cm:
self.kafka.delete_topics()
self.assertTrue(mock_delete_topics.called)
self.assertEqual(mock_delete_topics.call_count, 1)
self.assertTrue(
'INFO:lib_kafka.kafka:' + logging_codes.DELETE_TOPIC_SUCCESS % 'testTopic' in cm.output)
mock_delete_topics.reset_mock()
f.result = MagicMock(side_effect=Exception)
mock_delete_topics.return_value = {'testTopic': f}
with self.assertLogs('lib_kafka.kafka', level='ERROR') as cm:
self.kafka.delete_topics()
self.assertTrue(mock_delete_topics.called)
self.assertEqual(mock_delete_topics.call_count, 1)
self.assertTrue(mock_delete_topics.called)
self.assertTrue(mock_sys_exit.called)
self.assertTrue(len(cm.output) > 0)
mock_delete_topics.reset_mock()
@mock.patch("confluent_kafka.admin.AdminClient.create_topics")
@mock.patch("confluent_kafka.admin.AdminClient.delete_topics")
@mock.patch("confluent_kafka.admin.AdminClient.create_partitions")
@mock.patch("confluent_kafka.admin.AdminClient.list_topics")
def test_update_partitions(self, mock_list_topics, mock_create_partitions,
mock_delete_topics, mock_create_topics):
cluster_data1 = ClusterMetadata()
topic_data1 = TopicMetadata()
partition_data1 = PartitionMetadata()
partition_data1.replicas = [0]
topic_data1.topic = 'topic1'
topic_data1.partitions = {0: partition_data1}
cluster_data1.topics = {'topic1': topic_data1}
mock_list_topics.return_value = cluster_data1
self.kafka.update_topic_list = [
{'name': 'topic1', 'partitions': 1, 'replication_factors': 1, 'recreate_topic': False}]
# Same number of partitions as existing
with self.assertLogs('lib_kafka.kafka', level='INFO') as cm:
self.kafka.update_topics()
self.assertTrue(mock_list_topics.called)
self.assertEqual(mock_list_topics.call_count, 1)
self.assertTrue(
'INFO:lib_kafka.kafka:' + logging_codes.PARTITION_NUM_EQUAL % (1, 1, 'topic1') in cm.output)
mock_list_topics.reset_mock()
self.kafka.update_topic_list = [
{'name': 'topic1', 'partitions': 2, 'replication_factors': 1, 'recreate_topic': False}]
f = concurrent.futures.Future()
f.set_running_or_notify_cancel()
f.set_result(None)
mock_create_partitions.return_value = {'topic1': f}
with self.assertLogs('lib_kafka.kafka', level='INFO') as cm:
self.kafka.update_topics()
self.assertTrue(mock_list_topics.called)
self.assertEqual(mock_list_topics.call_count, 1)
self.assertTrue(mock_create_partitions.called)
self.assertEqual(mock_create_partitions.call_count, 1)
self.assertTrue(
'INFO:lib_kafka.kafka:' + logging_codes.ADD_PARTITION_SUCCESS % ('topic1', 2) in cm.output)
# Increase number of partitions - success
mock_create_partitions.reset_mock()
mock_create_partitions.return_value = {'testTopic1': None, 'testTopic2': f}
with self.assertLogs('lib_kafka.kafka', level='INFO') as cm:
self.kafka.update_topics()
self.assertTrue(
'INFO:lib_kafka.kafka:' + logging_codes.ADD_PARTITION_SUCCESS % ('testTopic2', 2) in cm.output)
self.assertTrue(
any('ERROR:lib_kafka.kafka:' in s for s in cm.output))
# Decrease number of partitions with recreate topic true
self.kafka.update_topic_list = [
{'name': 'topic1', 'partitions': 0, 'replication_factors': 1, 'recreate_topic': True}]
self.kafka.update_topics()
self.assertTrue(mock_delete_topics.called)
self.assertTrue(mock_create_topics.called)
# Decrease number of partitions with recreate topic False
self.kafka.update_topic_list = [
{'name': 'topic1', 'partitions': 0, 'replication_factors': 1, 'recreate_topic': False}]
mock_delete_topics.reset_mock()
mock_create_topics.reset_mock()
self.kafka.update_topics()
self.assertFalse(mock_delete_topics.called)
self.assertFalse(mock_create_topics.called)
# Topic does not exist
self.kafka.update_topic_list = [
{'name': 'topic2', 'partitions': 0, 'replication_factors': 1, 'recreate_topic': False}]
with self.assertLogs('lib_kafka.kafka', level='INFO') as cm:
self.kafka.update_topics()
self.assertTrue(
'INFO:lib_kafka.kafka:' + logging_codes.TOPIC_NOT_FOUND % 'topic2' in cm.output)
def test_convert_to_bool(self):
self.assertFalse(self.kafka._convert_to_bool(None))
self.assertFalse(self.kafka._convert_to_bool(''))
self.assertFalse(self.kafka._convert_to_bool(' '))
self.assertFalse(self.kafka._convert_to_bool('False'))
self.assertFalse(self.kafka._convert_to_bool('None'))
self.assertTrue(self.kafka._convert_to_bool('True'))
self.assertTrue(self.kafka._convert_to_bool('TRUE'))
if __name__ == '__main__':
unittest.main()
| 2.078125
| 2
|
tests/app_test/actions.py
|
marcosschroh/django-history-actions
| 1
|
12781433
|
<reponame>marcosschroh/django-history-actions<filename>tests/app_test/actions.py
from django.utils.translation import ugettext_lazy as _
PROFILE_SAVE_ACTION = 'PROFILE_SAVE_ACTION'
ACTIONS = {
'PROFILE_SAVE_ACTION': _('profile save action')
}
| 1.515625
| 2
|
education/HADDOCK24/shape-small-molecule/scripts/lig2shape.py
|
amjjbonvin/haddocking.github.io
| 12
|
12781434
|
<filename>education/HADDOCK24/shape-small-molecule/scripts/lig2shape.py
import sys
def format_shape_pharm(lig):
ligFile = open(lig, 'r')
for line in ligFile:
if 'HETATM' in line or 'ATOM' in line:
resi = int(line.split( )[1])
x = float(line.split( )[6])
y = float(line.split( )[7])
z = float(line.split( )[8])
pharm_info = float(line.split( )[9])
print("ATOM {: >4d} SHA SHA S{: >4d} {: 7.3f} {: 7.3f} {: 7.3f} {:5.2f} 1.00 ".format(resi, resi, x, y, z, pharm_info))
print("END")
def format_shape(lig):
ligFile = open(lig, 'r')
for line in ligFile:
if 'HETATM' in line or 'ATOM' in line:
resi = int(line.split( )[1])
x = float(line.split( )[6])
y = float(line.split( )[7])
z = float(line.split( )[8])
print("ATOM {: >4d} SHA SHA S{: >4d} {: 7.3f} {: 7.3f} {: 7.3f} 1.00 1.00 ".format(resi, resi, x, y, z))
print("END")
if __name__ == "__main__":
mode = sys.argv[1]
lig = sys.argv[2]
if mode == "shape":
format_shape(lig)
if mode == "pharm":
format_shape_pharm(lig)
| 2.65625
| 3
|
src/MQTT/Message/Formatters/JsonFormatter.py
|
dashford/sentinel-client
| 0
|
12781435
|
import json
class JsonFormatter:
def __init__(self):
pass
def format(self, message):
return json.dumps(message)
| 2.5
| 2
|
plotCDF.py
|
faithcomesbyhearing/verse-timing
| 0
|
12781436
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
file="/Users/spanta/Documents/batch_aeneas_scripts/batch_directory/QC_data/BMQBSMN2DA_epo_eng_plot_cdf.csv"
data_req = pd.read_table(file, sep=",")
arr = data_req.values
arr.sort(axis=0)
data_req = pd.DataFrame(arr, index=data_req.index, columns=data_req.columns)
#sort values per column
sorted_values = data_req.apply(lambda x: x.sort_values())
fig, ax = plt.subplots()
for col in sorted_values.columns:
y = np.linspace(0.,1., len(sorted_values[col].dropna()))
ax.plot(sorted_values[col].dropna(), y,label=col)
legend = ax.legend(loc='lower right', shadow=True, fontsize='medium')
plt.xlim([0, 5])
filename=(file.split('/')[-1]).split('_')[0]
plt.savefig('/Users/spanta/Documents/batch_aeneas_scripts/batch_directory/QC_data/'+filename+'.png')
#plt.show()
| 2.625
| 3
|
hawkweed/classes/future.py
|
hellerve/hawkweed
| 20
|
12781437
|
"""A Future class"""
from hawkweed.functional.primitives import reduce
from hawkweed.classes.repr import Repr
class Future(Repr):
"""A Future class"""
def __init__(self, value):
"""
Takes a binary function (taking success and error, respectively)
and builds a Future from it.
Complexity: O(1)
params:
value: the function to encase
returns:
a Future
"""
self.value = value
self.transforms = []
@staticmethod
def of(value):
"""
Creates a Future from a static value, immediately returning it.
Complexity: O(1)
params:
value: the value to encase
returns:
a Future
"""
return Future(lambda res, rej: res(value))
@staticmethod
def reject(value):
"""
Creates a Future from a static value, immediately rejecting it.
Complexity: O(1)
params:
value: the value to encase
returns:
a Future
"""
return Future(lambda res, rej: rej(value))
@staticmethod
def encase(fun, args=None):
"""
Encases an ordinary function in a Future. If the function runs
as expected the return value will be returned to the success
callback. If an exception occurs it will be returned to the
error callback.
Special behaviour:
You need to specify args. If the function does not have any,
add args=[]. If you do not a function that takes arguments
will be returned.
Complexity: O(1)
params:
fun: the function to encase
args: the arguments to pass to the function (defaults to None,
override to an empty sequence if no arguments are needed)
returns:
a Future
"""
if args is None:
return lambda *args: Future.encase(fun, args=args)
def res(res, rej):
"""Internal encase function"""
try:
return res(fun(*args))
except Exception as e:
return rej(e)
return Future(res)
def __repr__(self):
return "Future({})".format(self.value)
def apply(self, fun):
"""
Apply a transformation function fun to the future value.
Complexity: Application O(1), Execution O(fun)
params:
fun: the function to apply
returns:
a Future
"""
self.transforms.append(fun)
return self
def chain(self, future):
"""
Chains a future to this one. This will intercept
any calls to fork insofar as both Futures are chained
before any call to the callbacks. Any error in both
Futures will result in a call to the error callback.
Complexity: O(1)
params:
future: the Future to chain
returns:
a Future
"""
def chained(res, rej):
"""Internal chain function"""
self.value(lambda x: future(x).fork(res, rej), rej)
return Future(chained)
def fork(self, res, err):
"""
Registers resolvers for this Future.
Complexity: O(1)
params:
res: the resolver function
err: the error function
returns:
whatever the functions return
"""
def resolver(trans):
"""Internal fork function that applies transformations"""
try:
return res(reduce(lambda acc, x: x(acc), trans, self.transforms))
except Exception as e:
if err:
return err(e)
raise
return self.value(resolver, err)
| 3.5
| 4
|
boatsandjoy_api/bookings/payment_gateways.py
|
bertini36/boatsandjoy-api
| 0
|
12781438
|
from abc import ABC, abstractmethod
from decimal import Decimal
import stripe
from django.conf import settings
class PaymentGateway(ABC):
@classmethod
@abstractmethod
def generate_checkout_session_id(
cls,
name: str,
description: str,
price: float,
) -> str:
pass
@classmethod
@abstractmethod
def get_session_id_from_event(cls, event: dict) -> str:
pass
@classmethod
@abstractmethod
def get_customer_email_from_event(cls, event: dict) -> str:
pass
class StripePaymentGateway(PaymentGateway):
@classmethod
def generate_checkout_session_id(
cls,
name: str,
description: str,
price: Decimal
) -> str:
stripe.api_key = settings.STRIPE_SECRET_KEY
session = stripe.checkout.Session.create(
payment_method_types=['card'],
line_items=[
{
'name': name,
'description': description,
'amount': cls._format_price(price),
'currency': 'eur',
'quantity': 1,
}
],
success_url=(
settings.STRIPE_REDIRECT_URL
+ '?session_id={CHECKOUT_SESSION_ID}'
),
cancel_url=settings.STRIPE_REDIRECT_URL,
)
return session.id
@classmethod
def get_session_id_from_event(cls, event: dict) -> str:
return event['data']['object']['id']
@classmethod
def get_customer_email_from_event(cls, event: dict) -> str:
customer_id = event['data']['object']['customer']
stripe.api_key = settings.STRIPE_SECRET_KEY
customer = stripe.Customer.retrieve(customer_id)
return customer['email']
@staticmethod
def _format_price(price):
return int(float(price) * 100)
| 2.09375
| 2
|
chromium/tools/telemetry/telemetry/internal/backends/chrome_inspector/tracing_backend_unittest.py
|
wedataintelligence/vivaldi-source
| 27
|
12781439
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
import unittest
from telemetry import decorators
from telemetry.internal.backends.chrome_inspector import tracing_backend
from telemetry.internal.backends.chrome_inspector.tracing_backend import _DevToolsStreamReader
from telemetry.testing import fakes
from telemetry.testing import simple_mock
from telemetry.testing import tab_test_case
from telemetry.timeline import model as model_module
from telemetry.timeline import tracing_config
class TracingBackendTest(tab_test_case.TabTestCase):
# Number of consecutively requested memory dumps.
_REQUESTED_DUMP_COUNT = 3
@classmethod
def CustomizeBrowserOptions(cls, options):
options.AppendExtraBrowserArgs([
# Memory maps currently cannot be retrieved on sandboxed processes.
# See crbug.com/461788.
'--no-sandbox',
# Workaround to disable periodic memory dumps. See crbug.com/513692.
'--enable-memory-benchmarking'
])
def setUp(self):
super(TracingBackendTest, self).setUp()
self._tracing_controller = self._browser.platform.tracing_controller
if not self._tracing_controller.IsChromeTracingSupported():
self.skipTest('Browser does not support tracing, skipping test.')
if not self._browser.supports_memory_dumping:
self.skipTest('Browser does not support memory dumping, skipping test.')
@decorators.Disabled('win') # crbug.com/570955
def testDumpMemorySuccess(self):
# Check that dumping memory before tracing starts raises an exception.
self.assertRaises(Exception, self._browser.DumpMemory)
# Start tracing with memory dumps enabled.
config = tracing_config.TracingConfig()
config.tracing_category_filter.AddDisabledByDefault(
'disabled-by-default-memory-infra')
config.enable_chrome_trace = True
self._tracing_controller.StartTracing(config)
# Request several memory dumps in a row and test that they were all
# successfully created with unique IDs.
expected_dump_ids = []
for _ in xrange(self._REQUESTED_DUMP_COUNT):
dump_id = self._browser.DumpMemory()
self.assertIsNotNone(dump_id)
self.assertNotIn(dump_id, expected_dump_ids)
expected_dump_ids.append(dump_id)
trace_data = self._tracing_controller.StopTracing()
# Check that dumping memory after tracing stopped raises an exception.
self.assertRaises(Exception, self._browser.DumpMemory)
# Test that trace data is parsable.
model = model_module.TimelineModel(trace_data)
self.assertGreater(len(model.processes), 0)
# Test that the resulting model contains the requested memory dumps in the
# correct order (and nothing more).
actual_dump_ids = [d.dump_id for d in model.IterGlobalMemoryDumps()]
self.assertEqual(actual_dump_ids, expected_dump_ids)
@decorators.Disabled('win') # crbug.com/570955
def testDumpMemoryFailure(self):
# Check that dumping memory before tracing starts raises an exception.
self.assertRaises(Exception, self._browser.DumpMemory)
# Start tracing with memory dumps disabled.
config = tracing_config.TracingConfig()
config.enable_chrome_trace = True
self._tracing_controller.StartTracing(config)
# Check that the method returns None if the dump was not successful.
self.assertIsNone(self._browser.DumpMemory())
trace_data = self._tracing_controller.StopTracing()
# Check that dumping memory after tracing stopped raises an exception.
self.assertRaises(Exception, self._browser.DumpMemory)
# Test that trace data is parsable.
model = model_module.TimelineModel(trace_data)
self.assertGreater(len(model.processes), 0)
# Test that the resulting model contains no memory dumps.
self.assertEqual(len(list(model.IterGlobalMemoryDumps())), 0)
class TracingBackendUnitTest(unittest.TestCase):
def setUp(self):
self._mock_timer = simple_mock.MockTimer(tracing_backend)
self._inspector_socket = fakes.FakeInspectorWebsocket(self._mock_timer)
def tearDown(self):
self._mock_timer.Restore()
def testCollectTracingDataTimeout(self):
self._inspector_socket.AddEvent(
'Tracing.dataCollected', {'value': [{'ph': 'B'}]}, 9)
self._inspector_socket.AddEvent(
'Tracing.dataCollected', {'value': [{'ph': 'E'}]}, 19)
self._inspector_socket.AddEvent('Tracing.tracingComplete', {}, 35)
backend = tracing_backend.TracingBackend(self._inspector_socket)
# The third response is 16 seconds after the second response, so we expect
# a TracingTimeoutException.
with self.assertRaises(tracing_backend.TracingTimeoutException):
backend._CollectTracingData(10)
self.assertEqual(2, len(backend._trace_events))
self.assertFalse(backend._has_received_all_tracing_data)
def testCollectTracingDataNoTimeout(self):
self._inspector_socket.AddEvent(
'Tracing.dataCollected', {'value': [{'ph': 'B'}]}, 9)
self._inspector_socket.AddEvent(
'Tracing.dataCollected', {'value': [{'ph': 'E'}]}, 14)
self._inspector_socket.AddEvent('Tracing.tracingComplete', {}, 19)
backend = tracing_backend.TracingBackend(self._inspector_socket)
backend._CollectTracingData(10)
self.assertEqual(2, len(backend._trace_events))
self.assertTrue(backend._has_received_all_tracing_data)
def testCollectTracingDataFromStream(self):
self._inspector_socket.AddEvent(
'Tracing.tracingComplete', {'stream': '42'}, 1)
self._inspector_socket.AddAsyncResponse(
'IO.read', {'data': '[{},{},{'}, 2)
self._inspector_socket.AddAsyncResponse(
'IO.read', {'data': '},{},{}]', 'eof': True}, 3)
backend = tracing_backend.TracingBackend(self._inspector_socket)
backend._CollectTracingData(10)
self.assertEqual(5, len(backend._trace_events))
self.assertTrue(backend._has_received_all_tracing_data)
def testDumpMemorySuccess(self):
self._inspector_socket.AddResponseHandler(
'Tracing.requestMemoryDump',
lambda req: {'result': {'success': True, 'dumpGuid': '42abc'}})
backend = tracing_backend.TracingBackend(self._inspector_socket)
self.assertEqual(backend.DumpMemory(), '42abc')
def testDumpMemoryFailure(self):
self._inspector_socket.AddResponseHandler(
'Tracing.requestMemoryDump',
lambda req: {'result': {'success': False, 'dumpGuid': '42abc'}})
backend = tracing_backend.TracingBackend(self._inspector_socket)
self.assertIsNone(backend.DumpMemory())
class DevToolsStreamPerformanceTest(unittest.TestCase):
def setUp(self):
self._mock_timer = simple_mock.MockTimer(tracing_backend)
self._inspector_socket = fakes.FakeInspectorWebsocket(self._mock_timer)
def _MeasureReadTime(self, count):
mock_time = self._mock_timer.time() + 1
payload = ','.join(['{}'] * 5000)
self._inspector_socket.AddAsyncResponse('IO.read', {'data': '[' + payload},
mock_time)
startClock = time.clock()
done = {'done': False}
def mark_done(data):
del data # unused
done['done'] = True
reader = _DevToolsStreamReader(self._inspector_socket, 'dummy')
reader.Read(mark_done)
while not done['done']:
mock_time += 1
if count > 0:
self._inspector_socket.AddAsyncResponse('IO.read', {'data': payload},
mock_time)
elif count == 0:
self._inspector_socket.AddAsyncResponse('IO.read',
{'data': payload + ']', 'eof': True}, mock_time)
count -= 1
self._inspector_socket.DispatchNotifications(10)
return time.clock() - startClock
def testReadTime(self):
t1k = self._MeasureReadTime(1000)
t10k = self._MeasureReadTime(10000)
# Time is an illusion, CPU time is doubly so, allow great deal of tolerance.
toleranceFactor = 5
self.assertLess(t10k / t1k, 10000 / 1000 * toleranceFactor)
| 2.046875
| 2
|
examples/engines/common.py
|
sergunya17/catalyst
| 4
|
12781440
|
from functools import partial
from catalyst import dl, SETTINGS
E2E = {
"de": dl.DeviceEngine,
"dp": dl.DataParallelEngine,
"ddp": dl.DistributedDataParallelEngine,
}
if SETTINGS.amp_required:
E2E.update(
{"amp-dp": dl.DataParallelAMPEngine, "amp-ddp": dl.DistributedDataParallelAMPEngine}
)
if SETTINGS.apex_required:
E2E.update(
{"apex-dp": dl.DataParallelAPEXEngine, "apex-ddp": dl.DistributedDataParallelAPEXEngine}
)
if SETTINGS.deepspeed_required:
E2E.update({"ds-ddp": dl.DistributedDataParallelDeepSpeedEngine})
if SETTINGS.fairscale_required:
E2E.update(
{
"fs-pp": dl.PipelineParallelFairScaleEngine,
"fs-ddp": dl.SharedDataParallelFairScaleEngine,
"fs-ddp-amp": dl.SharedDataParallelFairScaleAMPEngine,
# for some reason we could catch a bug with FairScale flatten wrapper here, so...
"fs-fddp": partial(
dl.FullySharedDataParallelFairScaleEngine, ddp_kwargs={"flatten_parameters": False}
),
}
)
if SETTINGS.xla_required:
E2E.update({"xla": dl.XLAEngine, "xla-ddp": dl.DistributedXLAEngine})
| 1.992188
| 2
|
game-server/src/server/models.py
|
joniumGit/distributed-minesweeper
| 0
|
12781441
|
<gh_stars>0
import os
from typing import List, Optional, Iterable
from minesweeper.game import Minesweeper, Status
from pydantic import BaseModel, conint, Field, root_validator, validator
MAX_WIDTH = int(os.getenv('DS_MAX_WIDTH', '256'))
MAX_HEIGHT = int(os.getenv('DS_MAX_HEIGHT', '256'))
class Square(BaseModel):
x: conint(ge=0)
y: conint(ge=0)
open: Optional[bool]
flag: Optional[bool]
mine: Optional[bool]
value: Optional[conint(ge=0, lt=9)]
class Squares(BaseModel):
status: Status
items: Iterable[Square]
class Start(BaseModel):
width: conint(gt=0, lt=MAX_WIDTH)
height: conint(gt=0, lt=MAX_HEIGHT)
mines: conint(gt=0) = Field(..., description='Maximum is calculated via (width - 1)(height - 1)')
@validator("width")
def validate_width(cls, value):
assert value < MAX_WIDTH, "Field too wide"
return value
@validator("height")
def validate_height(cls, value):
assert value < MAX_HEIGHT, "Field too tall"
return value
@root_validator(pre=False, skip_on_failure=True)
def validate_combination(cls, value):
from minesweeper.logic import check_dims
width = value.get('width')
height = value.get('height')
mines = value.get('mines')
check_dims(width, height, mines)
return value
class Result(BaseModel):
status: Optional[Status]
mine: Optional[bool]
items: List[Square] = Field(default_factory=list)
class Config:
arbitrary_types_allowed = True
class Move(BaseModel):
x: conint(ge=0)
y: conint(ge=0)
@classmethod
def adapt(cls, game: Minesweeper):
"""
Modify model constraints to reflect current game
"""
from pydantic import BaseConfig
from pydantic.fields import ModelField
cls.__fields__['x'] = ModelField(
name='x',
type_=conint(ge=0, lt=game.width),
model_config=BaseConfig,
class_validators=None
)
cls.__fields__['y'] = ModelField(
name='y',
type_=conint(ge=0, lt=game.height),
model_config=BaseConfig,
class_validators=None
)
cls.__schema_cache__.clear()
def to_url(self):
return f'x={self.x}&y={self.y}'
__all__ = [
'Square',
'Squares',
'Start',
'Result',
'Move'
]
| 2.703125
| 3
|
example/avr/mk-avr-kalman-sim.e.py
|
martinmoene/kalman-estimator
| 20
|
12781442
|
#!/usr/bin/env python
# Copyright 2018 by <NAME>
#
# https://github.com/martinmoene/kalman-estimator
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import os
nt = 'double'
nt = 'fp32_t'
std = 'c++17'
opt = '-O2'
mcu = 'atmega328p'
fcpu = '16000000L'
filename = 'avr-kalman-sim.cpp'
verbose = '-vv'
include = '../../include'
tpl = 'python ../../script/avr-gcc.py {verbose} -std={std} {opt} -mmcu={mcu} -fcpu={fcpu} -DKE_NUMERIC_TYPE={nt} -I{include} {filename}'
cmd = tpl.format(nt=nt, verbose=verbose, std=std, opt=opt, mcu=mcu, fcpu=fcpu, include=include, filename=filename)
print( cmd )
os.system( cmd )
| 2
| 2
|
back-end/authorizer/authorizer.py
|
jjanczyszyn/serverless-instagram
| 18
|
12781443
|
<gh_stars>10-100
"""
based on https://github.com/awslabs/aws-apigateway-lambda-authorizer-blueprints/
blob/master/blueprints/python/api-gateway-authorizer-python.py
"""
import jwt
import os
import re
SECRET = os.environ['AUTH0_SECRET']
AUTH0_CLIENT_ID = os.environ['AUTH0_CLIENT_ID']
def handler(event, context):
auth_token = event['authorizationToken']
if not auth_token:
raise Exception('Unauthorized')
token = auth_token.split(' ')[1]
try:
payload = jwt.decode(token, SECRET, algorithms=['HS256'], audience=AUTH0_CLIENT_ID)
except jwt.InvalidTokenError:
raise Exception('Unauthorized')
else:
return get_auth_response(payload['email'], event)
def get_auth_response(principal_id, event):
"""
Builds auth response allowing all methods
"""
aws_region, aws_account_id, api_gateway_info = event['methodArn'].split(':')[3:]
api_gateway_id, stage = api_gateway_info.split('/')[:2]
# tmp = event['methodArn'].split(':')
# api_gateway_arn_tmp = tmp[5].split('/')
# aws_account_id = tmp[4]
# rest_api_id = api_gateway_arn_tmp[0]
# aws_region = tmp[3]
# stage = api_gateway_arn_tmp[1]
policy = AuthPolicy(principal_id, aws_account_id, api_gateway_id, aws_region, stage)
policy.allow_all_methods()
auth_response = policy.build()
context = {
'user_id': principal_id
}
auth_response['context'] = context
return auth_response
class HttpVerb:
GET = "GET"
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
HEAD = "HEAD"
DELETE = "DELETE"
OPTIONS = "OPTIONS"
ALL = "*"
class AuthPolicy(object):
# The policy version used for the evaluation. This should always be '2012-10-17'
version = "2012-10-17"
# The regular expression used to validate resource paths for the policy
path_regex = "^[/.a-zA-Z0-9-\*]+$"
def __init__(self, principal_id, aws_account_id, rest_api_id='*', region='*', stage='*'):
self.aws_account_id = aws_account_id
self.principal_id = principal_id
self.allow_methods = []
self.deny_methods = []
self.rest_api_id = rest_api_id
self.region = region
self.stage = stage
def _add_method(self, effect, verb, resource, conditions):
"""
Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null.
"""
if verb != "*" and not hasattr(HttpVerb, verb):
raise NameError("Invalid HTTP verb " + verb + ". Allowed verbs in HttpVerb class")
resource_pattern = re.compile(self.path_regex)
if not resource_pattern.match(resource):
raise NameError("Invalid resource path: " + resource + ". Path should match " + self.path_regex)
if resource[:1] == "/":
resource = resource[1:]
resource_arn = (
"arn:aws:execute-api:" +
self.region + ":" +
self.aws_account_id + ":" +
self.rest_api_id + "/" +
self.stage + "/" +
verb + "/" +
resource
)
if effect.lower() == "allow":
self.allow_methods.append({
'resourceArn': resource_arn,
'conditions': conditions
})
elif effect.lower() == "deny":
self.deny_methods.append({
'resourceArn': resource_arn,
'conditions': conditions
})
@staticmethod
def _get_empty_statement(effect):
"""
Returns an empty statement object prepopulated with the correct action and the
desired effect.
"""
statement = {
'Action': 'execute-api:Invoke',
'Effect': effect[:1].upper() + effect[1:].lower(),
'Resource': []
}
return statement
def _get_statement_for_effect(self, effect, methods):
"""
This function loops over an array of objects containing a resourceArn and
conditions statement and generates the array of statements for the policy.
"""
statements = []
if len(methods) > 0:
statement = self._get_empty_statement(effect)
for method in methods:
if not method['conditions']:
statement['Resource'].append(method['resourceArn'])
else:
conditional_statement = self._get_empty_statement(effect)
conditional_statement['Resource'].append(method['resourceArn'])
conditional_statement['Condition'] = method['conditions']
statements.append(conditional_statement)
statements.append(statement)
return statements
def allow_all_methods(self):
"""
Adds a '*' allow to the policy to authorize access to all methods of an API
"""
self._add_method("Allow", HttpVerb.ALL, "*", [])
def build(self):
"""
Generates the policy document based on the internal lists of allowed and denied
conditions. This will generate a policy with two main statements for the effect:
one statement for Allow and one statement for Deny.
Methods that includes conditions will have their own statement in the policy.
"""
if not (self.allow_methods or self.deny_methods):
raise NameError("No statements defined for the policy")
policy = {
'principalId': self.principal_id,
'policyDocument': {
'Version': self.version,
'Statement': []
}
}
policy['policyDocument']['Statement'].extend(
self._get_statement_for_effect("Allow", self.allow_methods)
)
policy['policyDocument']['Statement'].extend(
self._get_statement_for_effect("Deny", self.deny_methods)
)
return policy
| 2.203125
| 2
|
lib/improver/wxcode/wxcode_utilities.py
|
TomekTrzeciak/improver
| 0
|
12781444
|
<filename>lib/improver/wxcode/wxcode_utilities.py
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This module defines the utilities required for wxcode plugin """
from collections import OrderedDict
import iris
import numpy as np
from iris.exceptions import CoordinateNotFoundError
import improver.utilities.solar as solar
_WX_DICT_IN = {0: 'Clear_Night',
1: 'Sunny_Day',
2: 'Partly_Cloudy_Night',
3: 'Partly_Cloudy_Day',
4: 'Dust',
5: 'Mist',
6: 'Fog',
7: 'Cloudy',
8: 'Overcast',
9: 'Light_Shower_Night',
10: 'Light_Shower_Day',
11: 'Drizzle',
12: 'Light_Rain',
13: 'Heavy_Shower_Night',
14: 'Heavy_Shower_Day',
15: 'Heavy_Rain',
16: 'Sleet_Shower_Night',
17: 'Sleet_Shower_Day',
18: 'Sleet',
19: 'Hail_Shower_Night',
20: 'Hail_Shower_Day',
21: 'Hail',
22: 'Light_Snow_Shower_Night',
23: 'Light_Snow_Shower_Day',
24: 'Light_Snow',
25: 'Heavy_Snow_Shower_Night',
26: 'Heavy_Snow_Shower_Day',
27: 'Heavy_Snow',
28: 'Thunder_Shower_Night',
29: 'Thunder_Shower_Day',
30: 'Thunder'}
WX_DICT = OrderedDict(sorted(_WX_DICT_IN.items(), key=lambda t: t[0]))
DAYNIGHT_CODES = [1, 3, 10, 14, 17, 20, 23, 26, 29]
def add_wxcode_metadata(cube):
""" Add weather code metadata to a cube
Args:
cube (iris.cube.Cube):
Cube which needs weather code metadata added.
Returns:
cube (iris.cube.Cube):
Cube with weather code metadata added.
"""
cube.long_name = "weather_code"
cube.standard_name = None
cube.var_name = None
cube.units = "1"
wx_keys = np.array(list(WX_DICT.keys()))
cube.attributes.update({'weather_code': wx_keys})
wxstring = " ".join(WX_DICT.values())
cube.attributes.update({'weather_code_meaning': wxstring})
return cube
def expand_nested_lists(query, key):
"""
Produce flat lists from list and nested lists.
Args:
query (dict):
A single query from the decision tree.
key (str):
A string denoting the field to be taken from the dict.
Returns:
items (list):
A 1D list containing all the values for a given key.
"""
items = []
for item in query[key]:
if isinstance(item, list):
items.extend(item)
else:
items.extend([item])
return items
def update_daynight(cubewx):
""" Update weather cube depending on whether it is day or night
Args:
cubewx(iris.cube.Cube):
Cube containing only daytime weather symbols.
Returns:
cubewx_daynight(iris.cube.Cube):
Cube containing day and night weather symbols
Raises:
CoordinateNotFoundError : cube must have time coordinate.
"""
if not cubewx.coords("time"):
msg = ("cube must have time coordinate ")
raise CoordinateNotFoundError(msg)
time_dim = cubewx.coord_dims('time')
if not time_dim:
cubewx_daynight = iris.util.new_axis(cubewx.copy(), 'time')
else:
cubewx_daynight = cubewx.copy()
daynightplugin = solar.DayNightMask()
daynight_mask = daynightplugin.process(cubewx_daynight)
# Loop over the codes which decrease by 1 if a night time value
# e.g. 1 - sunny day becomes 0 - clear night.
for val in DAYNIGHT_CODES:
index = np.where(cubewx_daynight.data == val)
# Where day leave as is, where night correct weather
# code to value - 1.
cubewx_daynight.data[index] = np.where(
daynight_mask.data[index] == daynightplugin.day,
cubewx_daynight.data[index],
cubewx_daynight.data[index] - 1)
if not time_dim:
cubewx_daynight = iris.util.squeeze(cubewx_daynight)
return cubewx_daynight
| 1.5
| 2
|
smartmin/perms.py
|
nickhargreaves/smartmin
| 166
|
12781445
|
from django.contrib.auth.models import Permission
def assign_perm(perm, group):
"""
Assigns a permission to a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.add(perm)
return perm
def remove_perm(perm, group):
"""
Removes a permission from a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.remove(perm)
return
| 2.40625
| 2
|
src/sst/elements/scheduler/simulations/python_scripts_Jan_2017/emberLoad.py
|
feldergast/sst-elements
| 2
|
12781446
|
import sys,getopt
#sys.path.insert(0, 'PATH')
sys.path.insert(0, '/mnt/nokrb/fkaplan3/SST/git/sst/sst-elements/src/sst/elements/ember/test')
import sst
from sst.merlin import *
import loadInfo
from loadInfo import *
import networkConfig
from networkConfig import *
import random
import defaultParams
import defaultSim
import chamaOpenIBParams
import chamaPSMParams
import bgqParams
import exaParams
debug = 0
emberVerbose = 0
embermotifLog = ''
emberrankmapper = ''
embermapFile = ''
networkStatOut = ''
statNodeList = []
jobid = 0
loadFile = ''
workList = []
workFlow = []
numCores = 1
numNodes = 0
platform = 'default'
netFlitSize = ''
netBW = ''
netPktSize = ''
netTopo = ''
netShape = ''
rtrArb = ''
routingAlg = ''
host_bw = ''
group_bw = ''
global_bw = ''
global_link_arrangement = ''
rndmPlacement = False
#rndmPlacement = True
bgPercentage = int(0)
bgMean = 1000
bgStddev = 300
bgMsgSize = 1000
motifDefaults = {
'cmd' : "",
'printStats' : 0,
'api': "HadesMP",
'spyplotmode': 0
}
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["topo=", "shape=", "routingAlg=", "link_arrangement=",
"debug=","platform=","numNodes=",
"numCores=","loadFile=","cmdLine=","printStats=","networkStatOut=","randomPlacement=",
"emberVerbose=","netBW=","netPktSize=","netFlitSize=",
"rtrArb=","embermotifLog=", "rankmapper=", "mapFile=",
"host_bw=","group_bw=","global_bw=",
"bgPercentage=","bgMean=","bgStddev=","bgMsgSize="])
except getopt.GetoptError as err:
print str(err)
sys.exit(2)
for o, a in opts:
if o in ("--shape"):
netShape = a
elif o in ("--platform"):
platform = a
elif o in ("--routingAlg"):
routingAlg = a
elif o in ("--numCores"):
numCores = a
elif o in ("--numNodes"):
numNodes = a
elif o in ("--debug"):
debug = a
elif o in ("--loadFile"):
loadFile = a
elif o in ("--cmdLine"):
motif = dict.copy(motifDefaults)
motif['cmd'] = a
workFlow.append( motif )
elif o in ("--topo"):
netTopo = a
elif o in ("--printStats"):
motifDefaults['printStats'] = a
elif o in ("--networkStatOut"):
networkStatOut = a
elif o in ("--emberVerbose"):
emberVerbose = a
elif o in ("--embermotifLog"):
embermotifLog = a
elif o in ("--rankmapper"):
emberrankmapper = a
elif o in ("--mapFile"):
embermapFile = a
elif o in ("--netBW"):
netBW = a
elif o in ("--host_bw"):
host_bw = a
elif o in ("--group_bw"):
group_bw = a
elif o in ("--global_bw"):
global_bw = a
elif o in ("--link_arrangement"):
global_link_arrangement = a
elif o in ("--netFlitSize"):
netFlitSize = a
elif o in ("--netPktSize"):
netPktSize = a
elif o in ("--rtrArb"):
rtrArb = a
elif o in ("--randomPlacement"):
if a == "True":
rndmPlacement = True
elif o in ("--bgPercentage"):
bgPercentage = int(a)
elif o in ("--bgMean"):
bgMean = int(a)
elif o in ("--bgStddev"):
bgStddev = int(a)
elif o in ("--bgMsgSize"):
bgMsgSize = int(a)
else:
assert False, "unhandle option"
if 1 == len(sys.argv):
workFlow, numNodes, numCores = defaultSim.getWorkFlow( motifDefaults )
platform, netTopo, netShape = defaultSim.getNetwork( )
if workFlow:
workList.append( [jobid, workFlow] )
jobid += 1
print "EMBER: platform: {0}".format( platform )
platNetConfig = {}
if platform == "default":
nicParams = defaultParams.nicParams
networkParams = defaultParams.networkParams
hermesParams = defaultParams.hermesParams
emberParams = defaultParams.emberParams
elif platform == "chamaPSM":
nicParams = chamaPSMParams.nicParams
networkParams = chamaPSMParams.networkParams
hermesParams = chamaPSMParams.hermesParams
emberParams = chamaPSMParams.emberParams
platNetConfig = chamaPSMParams.netConfig
elif platform == "chamaOpenIB":
nicParams = chamaOpenIBParams.nicParams
networkParams = chamaOpenIBParams.networkParams
hermesParams = chamaOpenIBParams.hermesParams
emberParams = chamaOpenIBParams.emberParams
platNetConfig = chamaOpenIBParams.netConfig
elif platform == "bgq":
nicParams = bgqParams.nicParams
networkParams = bgqParams.networkParams
hermesParams = bgqParams.hermesParams
emberParams = bgqParams.emberParams
platNetConfig = bgqParams.netConfig
elif platform == "exa":
nicParams = exaParams.nicParams
networkParams = exaParams.networkParams
hermesParams = exaParams.hermesParams
emberParams = exaParams.emberParams
platNetConfig = exaParams.netConfig
if netBW:
networkParams['link_bw'] = netBW
#nicParams['link_bw'] = "1GB/s"
nicParams['link_bw'] = netBW
if netFlitSize:
networkParams['flitSize'] = netFlitSize
if netPktSize:
networkParams['packetSize'] = netPktSize
if "" == netTopo:
if platNetConfig['topology']:
netTopo = platNetConfig['topology']
else:
sys.exit("What topo? [torus|fattree|dragonfly]")
if "" == netShape:
if platNetConfig['shape']:
netShape = platNetConfig['shape']
else:
sys.exit("Error: " + netTopo + " needs shape")
if "torus" == netTopo:
topoInfo = TorusInfo(netShape)
topo = topoTorus()
elif "fattree" == netTopo:
topoInfo = FattreeInfo(netShape)
topo = topoFatTree()
elif "dragonfly" == netTopo:
topoInfo = DragonFlyInfo(netShape)
if "" != routingAlg:
topoInfo.params["dragonfly:algorithm"] = routingAlg
if routingAlg == "valiant":
nicParams['module'] = "merlin.reorderlinkcontrol"
if "" != host_bw:
topoInfo.params["link_bw:host"] = host_bw
if "" != group_bw:
topoInfo.params["link_bw:group"] = group_bw
if "" != global_bw:
topoInfo.params["link_bw:global"] = global_bw
topo = topoDragonFly()
elif "dragonfly2" == netTopo:
print "netTopo is dragonfly2"
topoInfo = DragonFly2Info(netShape)
topoInfo.params["dragonfly:intergroup_links"] = 1
topoInfo.params["xbar_bw"] = netBW
#topoInfo.params["xbar_bw"] = "17GB/s"
if "" != routingAlg:
topoInfo.params["dragonfly:algorithm"] = routingAlg
print routingAlg
if routingAlg == "valiant":
nicParams['module'] = "merlin.reorderlinkcontrol"
if "" != host_bw:
topoInfo.params["link_bw:host"] = host_bw
if "" != group_bw:
topoInfo.params["link_bw:group"] = group_bw
if "" != global_bw:
topoInfo.params["link_bw:global"] = global_bw
topo = topoDragonFly2()
print topo
#Set global link arrangements
#print global_link_arrangement
if "" == global_link_arrangement:
global_link_arrangement = "absolute"
if global_link_arrangement == "relative" or global_link_arrangement == "circulant":
topo.setRoutingModeRelative()
if global_link_arrangement == "circulant":
ngrp = int(topoInfo.params["dragonfly:num_groups"])
glm = []
for i in range(int(ngrp/2)):
glm.append(i)
if ngrp - 2 - i != i:
glm.append(ngrp - 2 - i)
topo.setGlobalLinkMap(glm)
else:
sys.exit("how did we get here")
if rtrArb:
print "EMBER: network: topology={0} shape={1} arbitration={2}".format(netTopo,netShape,rtrArb)
else:
print "EMBER: network: topology={0} shape={1}".format(netTopo,netShape)
if int(numNodes) == 0:
numNodes = int(topoInfo.getNumNodes())
if int(numNodes) > int(topoInfo.getNumNodes()):
sys.exit("need more nodes want " + str(numNodes) + ", have " + str(topoInfo.getNumNodes()))
print "EMBER: numNodes={0} numNics={1}".format(numNodes, topoInfo.getNumNodes() )
emptyNids = []
if jobid > 0 and rndmPlacement:
print "EMBER: random placement"
hermesParams["hermesParams.mapType"] = 'random'
random.seed( 0xf00dbeef )
nidList=""
nids = random.sample( xrange( int(topoInfo.getNumNodes())), int(numNodes) )
#nids.sort()
allNids = []
for num in range ( 0, int( topoInfo.getNumNodes()) ):
allNids.append( num )
emptyNids = list( set(allNids).difference( set(nids) ) )
while nids:
nidList += str(nids.pop(0))
if nids:
nidList +=","
tmp = workList[0]
tmp = tmp[1]
for x in tmp:
x['cmd'] = "-nidList=" + nidList + " " + x['cmd']
random.shuffle( emptyNids )
XXX = []
if rndmPlacement and bgPercentage > 0:
if bgPercentage > 100:
sys.exit( "fatal: bgPercentage " + str(bgPercentage) );
count = 0
bgPercentage = float(bgPercentage) / 100.0
avail = int( topoInfo.getNumNodes() * bgPercentage )
bgMotifs, r = divmod( avail - int(numNodes), 2 )
print "EMBER: netAlloced {0}%, bg motifs {1}, mean {2} ns, stddev {3} ns, msgsize {4} bytes".\
format(int(bgPercentage*100),bgMotifs,bgMean,bgStddev,bgMsgSize)
while ( count < bgMotifs ) :
workFlow = []
nidList = "-nidList=" + str(emptyNids[ count * 2 ] ) + "," + str(emptyNids[ count * 2 + 1])
motif = dict.copy(motifDefaults)
motif['cmd'] = nidList + " Init"
workFlow.append( motif )
motif = dict.copy(motifDefaults)
x,y = divmod( count , 60 )
motif['cmd'] = nidList + " TrafficGen mean="+str(bgMean)+ " stddev=" + \
str(bgStddev) + " messageSize="+str(bgMsgSize) + " startDelay=" + str( y * 500 )
workFlow.append( motif )
motif = dict.copy(motifDefaults)
motif['cmd'] = nidList + " Fini"
workFlow.append( motif )
workList.append( [ jobid, workFlow ] )
jobid += 1
count += 1
nicParams['verboseLevel'] = debug
hermesParams['hermesParams.verboseLevel'] = debug
hermesParams['hermesParams.nicParams.verboseLevel'] = debug
hermesParams['hermesParams.functionSM.verboseLevel'] = debug
hermesParams['hermesParams.ctrlMsg.verboseLevel'] = debug
emberParams['verbose'] = emberVerbose
if embermotifLog:
emberParams['motifLog'] = embermotifLog
if emberrankmapper:
emberParams['rankmapper'] = emberrankmapper
if embermapFile:
emberParams['mapFile'] = embermapFile
print "EMBER: network: BW={0} pktSize={1} flitSize={2}".format(
networkParams['link_bw'], networkParams['packetSize'], networkParams['flitSize'])
sst.merlin._params["link_lat"] = networkParams['link_lat']
sst.merlin._params["link_bw"] = networkParams['link_bw']
sst.merlin._params["xbar_bw"] = networkParams['link_bw']
#sst.merlin._params["xbar_bw"] = "17GB/s"
sst.merlin._params["flit_size"] = networkParams['flitSize']
sst.merlin._params["input_latency"] = networkParams['input_latency']
sst.merlin._params["output_latency"] = networkParams['output_latency']
sst.merlin._params["input_buf_size"] = networkParams['buffer_size']
sst.merlin._params["output_buf_size"] = networkParams['buffer_size']
if rtrArb:
sst.merlin._params["xbar_arb"] = "merlin." + rtrArb
sst.merlin._params.update( topoInfo.getNetworkParams() )
epParams = {}
epParams.update(emberParams)
epParams.update(hermesParams)
loadInfo = LoadInfo( nicParams, epParams, numNodes, numCores, topoInfo.getNumNodes() )
if len(loadFile) > 0:
if len(workList) > 0:
sys.exit("Error: can't specify both loadFile and cmdLine");
loadInfo.initFile( motifDefaults, loadFile, statNodeList )
else:
if len(workList) > 0:
if len(loadFile) > 0:
sys.exit("Error: can't specify both loadFile and cmdLine");
loadInfo.initWork( workList, statNodeList )
else:
sys.exit("Error: need a loadFile or cmdLine")
topo.prepParams()
topo.setEndPointFunc( loadInfo.setNode )
topo.build()
'''
sst.setStatisticLoadLevel(8)
sst.setStatisticOutput("sst.statOutputCSV")
sst.setStatisticOutputOptions( {"filepath" : "%s" %(networkStatOut), "separator" : ", " } )
sst.enableAllStatisticsForComponentType("merlin.hr_router")
'''
| 1.515625
| 2
|
tests/test_reply.py
|
LaudateCorpus1/apostello
| 69
|
12781447
|
<reponame>LaudateCorpus1/apostello<filename>tests/test_reply.py<gh_stars>10-100
import pytest
from tests.conftest import twilio_vcr
from apostello.models import Recipient, RecipientGroup
from apostello.reply import InboundSms
from apostello.utils import fetch_default_reply
@pytest.mark.django_db
class TestConstructReply:
"""Tests apostello.reply:InboundSms.construct_reply function."""
def test_no_existing_keyword(self, recipients):
msg = InboundSms({"From": str(recipients["calvin"].number), "Body": "nope"})
reply = msg.construct_reply()
assert reply == fetch_default_reply("keyword_no_match").replace("%name%", "John")
def test_existing_keyword(self, recipients, keywords):
msg = InboundSms({"From": str(recipients["calvin"].number), "Body": "test msg"})
reply = msg.construct_reply()
assert reply == "Test custom response with John"
@twilio_vcr
def test_name(self, recipients):
msg = InboundSms({"From": str(recipients["calvin"].number), "Body": "name <NAME>"})
reply = msg.construct_reply()
assert "John" in str(reply)
def test_name_never_contact(self, recipients):
recipients["beza"].never_contact = True
recipients["beza"].save()
msg = InboundSms({"From": str(recipients["beza"].number), "Body": "name"})
reply = msg.construct_reply()
assert len(reply) == 0
@twilio_vcr
def test_only_one_name(self, recipients):
msg = InboundSms({"From": str(recipients["calvin"].number), "Body": "name JohnCalvin"})
reply = msg.construct_reply()
assert "Something went wrong" in reply
@twilio_vcr
def test_stop_start(self, recipients):
msg = InboundSms({"From": str(recipients["calvin"].number), "Body": "stop "})
reply = msg.construct_reply()
assert len(reply) == 0
assert Recipient.objects.get(pk=recipients["calvin"].pk).is_blocking
msg = InboundSms({"From": str(recipients["calvin"].number), "Body": "start "})
reply = msg.construct_reply()
assert Recipient.objects.get(pk=recipients["calvin"].pk).is_blocking is False
assert "signing up" in reply
@twilio_vcr
def test_existing_keyword_new_contact(self, keywords):
msg = InboundSms({"From": "+447927401749", "Body": "test msg"})
reply = msg.construct_reply()
assert reply == "Thanks new person!"
@twilio_vcr
def test_existing_keyword_new_contact(self, keywords):
msg = InboundSms({"From": "+447927401749", "Body": "2test msg"})
reply = msg.construct_reply()
assert reply == fetch_default_reply("default_no_keyword_auto_reply").replace("%name%", "Unknown")
def test_is_blocking_reply(self, recipients):
msg = InboundSms({"From": str(recipients["wesley"].number), "Body": "test"})
reply = msg.construct_reply()
assert len(reply) == 0
def test_do_not_reply(self, recipients):
msg = InboundSms({"From": str(recipients["beza"].number), "Body": "test"})
reply = msg.construct_reply()
assert len(reply) == 0
def test_never_contact(self, recipients):
recipients["beza"].never_contact = True
recipients["beza"].save()
msg = InboundSms({"From": str(recipients["beza"].number), "Body": "test"})
reply = msg.construct_reply()
assert len(reply) == 0
def test_switch_off_no_keyword_reply(self, recipients):
from site_config.models import DefaultResponses
dr = DefaultResponses.get_solo()
dr.keyword_no_match = ""
dr.clean()
dr.save()
msg = InboundSms({"From": str(recipients["calvin"].number), "Body": "test"})
reply = msg.construct_reply()
assert len(reply) == 0
def test_contact_added_to_group_keyword(self, recipients, groups, keywords):
populated_group = groups["test_group"]
empty_group = groups["empty_group"]
assert empty_group.recipient_set.count() == 0
assert populated_group.recipient_set.count() == 2
test_keyword = keywords["test"]
test_keyword.linked_groups.add(empty_group, populated_group)
test_keyword.save()
msg = InboundSms({"From": str(recipients["beza"].number), "Body": "test"})
reply = msg.construct_reply()
grp1 = RecipientGroup.objects.get(name="Empty Group")
assert grp1.recipient_set.all().count() == 1
grp2 = RecipientGroup.objects.get(name="Test Group")
assert grp2.recipient_set.all().count() == 3
# let's repeat to test case where contact already in group:
reply = msg.construct_reply()
grp = RecipientGroup.objects.get(name="Empty Group")
assert grp.recipient_set.count() == 1
| 2.28125
| 2
|
mockerena/models/schema.py
|
FanThreeSixty/mockerena
| 1
|
12781448
|
"""Definition for mockerena schema
.. codeauthor:: <NAME> <<EMAIL>>
"""
from copy import deepcopy
SCHEMA = {
"item_title": "schema",
"schema": {
"schema": {
"type": "string",
"minlength": 3,
"maxlength": 64,
"unique": True,
"required": True
},
"num_rows": {
"type": "integer",
"min": 1,
"default": 1000
},
"file_format": {
"type": "string",
"required": True
},
"file_name": {
"type": "string",
"minlength": 3,
"maxlength": 64,
"unique": True,
"required": True
},
"include_header": {"type": "boolean"},
"exclude_null": {"type": "boolean"},
"is_nested": {"type": "boolean"},
"delimiter": {"type": "string"},
"key_separator": {"type": "string"},
"quote_character": {"type": "string"},
"template": {"type": "string"},
"root_node": {"type": "string"},
"table_name": {
"type": "string"
},
"columns": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"type": {"type": "string"},
"name": {"type": "string"},
"format": {"type": "string"},
"args": {"type": "dict"},
"percent_empty": {
"type": "float",
"min": 0,
"max": 1
},
"truncate": {"type": "boolean"},
"function": {"type": "string"},
"description": {"type": "string"}
}
}
},
"responses": {
"type": "list",
"items": [
{
"type": "dict",
"schema": {
"status_code": {
"type": "integer",
"min": 100,
"max": 599
},
"headers": {"type": "dict", "allow_unknown": True},
"content_type": {"type": "string"},
"data": {"type": "string"},
"weight": {
"type": "integer",
"min": 1
}
}
}
]
}
},
"additional_lookup": {
"url": 'regex("[\\w]+")',
"field": "schema"
},
}
# Build a schema for custom_schema route
CUSTOM_SCHEMA = deepcopy(SCHEMA["schema"])
del CUSTOM_SCHEMA["schema"]["unique"]
del CUSTOM_SCHEMA["file_name"]["unique"]
| 2.171875
| 2
|
examples/python/keras/func_cifar10_alexnet.py
|
NodLabs/FlexFlow
| 1
|
12781449
|
# Copyright 2020 Stanford University, Los Alamos National Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flexflow.keras.models import Model, Sequential
from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate
import flexflow.keras.optimizers
from flexflow.keras.datasets import mnist
from flexflow.keras.datasets import cifar10
from flexflow.keras import losses
from flexflow.keras import metrics
from flexflow.keras.callbacks import Callback, VerifyMetrics, EpochVerifyMetrics
from accuracy import ModelAccuracy
import flexflow.core as ff
import numpy as np
import argparse
import gc
from PIL import Image
def top_level_task():
num_samples = 10000
(x_train, y_train), (x_test, y_test) = cifar10.load_data(num_samples)
full_input_np = np.zeros((num_samples, 3, 229, 229), dtype=np.float32)
for i in range(0, num_samples):
image = x_train[i, :, :, :]
image = image.transpose(1, 2, 0)
pil_image = Image.fromarray(image)
pil_image = pil_image.resize((229,229), Image.NEAREST)
image = np.array(pil_image, dtype=np.float32)
image = image.transpose(2, 0, 1)
full_input_np[i, :, :, :] = image
if (i == 0):
print(image)
full_input_np /= 255
y_train = y_train.astype('int32')
full_label_np = y_train
input_tensor = Input(shape=(3, 229, 229), dtype="float32")
output = Conv2D(filters=64, input_shape=(3,229,229), kernel_size=(11,11), strides=(4,4), padding=(2,2), activation="relu")(input_tensor)
output = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding="valid")(output)
output = Conv2D(filters=192, kernel_size=(5,5), strides=(1,1), padding=(2,2), activation="relu")(output)
output = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding="valid")(output)
output = Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu")(output)
output = Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu")(output)
output = Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu")(output)
output = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding="valid")(output)
output = Flatten()(output)
output = Dense(4096, activation="relu")(output)
output = Dense(4096, activation="relu")(output)
output = Dense(10)(output)
output = Activation("softmax")(output)
model = Model(input_tensor, output)
opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy', 'sparse_categorical_crossentropy'])
print(model.summary())
model.fit(full_input_np, full_label_np, epochs=40, callbacks=[VerifyMetrics(ModelAccuracy.CIFAR10_ALEXNET), EpochVerifyMetrics(ModelAccuracy.CIFAR10_ALEXNET)])
if __name__ == "__main__":
print("Functional API, cifar10 alexnet")
top_level_task()
gc.collect()
| 2.328125
| 2
|
intro/app/schemas.py
|
bmugenya/FastAPI
| 0
|
12781450
|
<filename>intro/app/schemas.py
from pydantic import BaseModel,EmailStr
from typing import Optional
from datetime import datetime
class Post(BaseModel):
title:str
content:str
class User(BaseModel):
email: EmailStr
password:str
class Token(BaseModel):
access_token: str
token_type: str
class TokenData(BaseModel):
username: Optional[str] = None
class PostCreate(Post):
title:str
content:str
email: EmailStr
| 2.53125
| 3
|