max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
src/rdflib_django/forms.py | publysher/rdflib-django | 3 | 12758051 | """
Base forms for editing the models in this module. You can use or extend these forms in your
project to ensure that all validation is correct.
"""
from django import forms
from rdflib_django import models
from rdflib import namespace
class NamespaceForm(forms.ModelForm):
"""
Form for editing namespaces.
"""
class Meta:
model = models.NamespaceModel
fields = ('prefix', 'uri')
def __init__(self, *args, **kwargs):
super(NamespaceForm, self).__init__(*args, **kwargs)
if self.instance.fixed:
self.fields['prefix'].widget.attrs['readonly'] = True
self.fields['uri'].widget.attrs['readonly'] = True
def clean_prefix(self):
"""
Validates the prefix
"""
if self.instance.fixed:
return self.instance.prefix
prefix = self.cleaned_data['prefix']
if not namespace.is_ncname(prefix):
raise forms.ValidationError("This is an invalid prefix")
return prefix
def clean_uri(self):
"""
Validates the URI
"""
if self.instance.fixed:
return self.instance.uri
uri = self.cleaned_data['uri']
# todo: URI validation
return uri
| 2.484375 | 2 |
server/src/oscarbluelight/dashboard/ranges/apps.py | MaximBrewer/sebe | 8 | 12758052 | <filename>server/src/oscarbluelight/dashboard/ranges/apps.py
from django.urls import re_path
from oscar.apps.dashboard.ranges import apps
from oscar.core.loading import get_class
class RangesDashboardConfig(apps.RangesDashboardConfig):
name = "oscarbluelight.dashboard.ranges"
def ready(self):
super().ready()
self.list_view = get_class("ranges_dashboard.views", "RangeListView")
self.products_view = get_class("ranges_dashboard.views", "RangeProductListView")
def get_urls(self):
price_list_view = get_class("ranges_dashboard.views", "RangePriceListView")
excluded_products_view = get_class(
"ranges_dashboard.views", "RangeExcludedProductsView"
)
urlpatterns = [
re_path(
r"^(?P<pk>\d+)/prices/$", price_list_view.as_view(), name="range-prices"
),
re_path(
r"^(?P<pk>\d+)/excluded-products/$",
excluded_products_view.as_view(),
name="range-excluded-products",
),
]
return super().get_urls() + self.post_process_urls(urlpatterns)
| 1.9375 | 2 |
setup.py | ericoporto/agstoolbox | 2 | 12758053 | from codecs import open
from setuptools import setup
import re
with open('src/agstoolbox/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
setup(
name='agstoolbox',
version=version,
description='A Toolbox for managing AGS Editor versions.',
url='https://github.com/ericoporto/agstoolbox',
download_url='https://github.com/ericoporto/agstoolbox/tarball/' + version,
author='erico',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License'
],
keywords='AGS Toolbox',
install_requires=['pyqt6', 'requests', 'defusedxml', 'platformdirs', 'pefile'],
packages=["agstoolbox"],
package_dir={"": "src"},
scripts=["agstoolbox", "atbx"],
package_data={
'agstoolbox': ['data/*.png']
},
)
| 1.53125 | 2 |
tests/core/txs/test_TransactionValidateSlave.py | gcchainlabs/gcchain-core | 1 | 12758054 | <reponame>gcchainlabs/gcchain-core
from unittest import TestCase
from mock import patch, Mock
from gc.core import config
from gc.core.Indexer import Indexer
from gc.core.misc import logger
from gc.core.State import State
from gc.core.StateContainer import StateContainer
from gc.core.OptimizedAddressState import OptimizedAddressState
from gc.core.txs.MessageTransaction import MessageTransaction
from tests.misc.helper import get_alice_xmss, get_bob_xmss, set_gc_dir
logger.initialize_default()
@patch('gc.core.txs.Transaction.logger')
class TestTransactionValidateSlave(TestCase):
def setUp(self):
with set_gc_dir('no_data'):
self.state = State()
self.alice = get_alice_xmss()
self.params = {
"message_hash": b'Test Message',
"addr_to": None,
"fee": 1,
"xmss_pk": self.alice.pk
}
self.m_addr_state = Mock(autospec=OptimizedAddressState, name='addr_state', balance=200)
self.m_addr_from_pk_state = Mock(autospec=OptimizedAddressState, name='addr_from_pk_state')
def test_validate_slave_valid(self, m_logger):
tx = MessageTransaction.create(**self.params)
tx.sign(self.alice)
result = tx.validate_slave(0)
self.assertTrue(result)
def test_validate_slave_master_addr_same_as_signing_addr(self, m_logger):
self.params["master_addr"] = self.alice.address
tx = MessageTransaction.create(**self.params)
tx.sign(self.alice)
result = tx.validate_slave(None)
self.assertFalse(result)
def test_validate_slave_signing_xmss_state_has_no_slave_permissions_in_state(self, m_logger):
bob = get_bob_xmss()
# Let's say Alice is Bob's master.
self.params["master_addr"] = self.alice.address
self.params["xmss_pk"] = bob.pk
# We need to add extra data to the mock AddressState.
tx = MessageTransaction.create(**self.params)
tx.sign(self.alice)
state_container = StateContainer(addresses_state=dict(),
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=1000,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
result = tx.validate_slave(state_container)
self.assertFalse(result)
| 2.15625 | 2 |
mplhep/alice.py | fmazzasc/mplhep | 0 | 12758055 | <reponame>fmazzasc/mplhep<gh_stars>0
# Log styles
from . import styles_alice as style
from . import label as label_base
from .label import lumitext
import mplhep._deprecate as deprecate
from matplotlib import docstring
__all__ = [style, lumitext]
# Experiment wrappers, full names made private
def _alice_text(text="", **kwargs):
return label_base._exp_text(
"ALICE", text=text, fontsize=28, loc=1, italic=(False, False), **kwargs
)
def _alice_label(**kwargs):
return label_base._exp_label(
exp="ALICE", fontsize=28, loc=1, italic=(False, False), rlabel="", **kwargs
)
@docstring.copy(label_base._exp_text)
def text(*args, **kwargs):
return _alice_text(*args, **kwargs)
@docstring.copy(label_base._exp_label)
def label(**kwargs):
return _alice_label(**kwargs)
| 1.945313 | 2 |
migrations/versions/064a0de5f947_gpu_info.py | jonkeane/conbench | 48 | 12758056 | <filename>migrations/versions/064a0de5f947_gpu_info.py
"""gpu info
Revision ID: 064a0de5f947
Revises: <KEY>
Create Date: 2021-09-03 11:00:08.205984
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "064a0de5f947"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("machine", sa.Column("gpu_count", sa.Integer(), nullable=True))
op.add_column(
"machine",
sa.Column("gpu_product_names", postgresql.ARRAY(sa.Text()), nullable=True),
)
op.drop_index("machine_index", table_name="machine")
op.create_index(
"machine_index",
"machine",
[
"name",
"architecture_name",
"kernel_name",
"os_name",
"os_version",
"cpu_model_name",
"cpu_l1d_cache_bytes",
"cpu_l1i_cache_bytes",
"cpu_l2_cache_bytes",
"cpu_l3_cache_bytes",
"cpu_core_count",
"cpu_thread_count",
"cpu_frequency_max_hz",
"memory_bytes",
"gpu_count",
"gpu_product_names",
],
unique=True,
)
def downgrade():
op.drop_index("machine_index", table_name="machine")
op.create_index(
"machine_index",
"machine",
[
"name",
"architecture_name",
"kernel_name",
"os_name",
"os_version",
"cpu_model_name",
"cpu_l1d_cache_bytes",
"cpu_l1i_cache_bytes",
"cpu_l2_cache_bytes",
"cpu_l3_cache_bytes",
"cpu_core_count",
"cpu_thread_count",
"cpu_frequency_max_hz",
"memory_bytes",
],
unique=False,
)
op.drop_column("machine", "gpu_product_names")
op.drop_column("machine", "gpu_count")
| 1.8125 | 2 |
main_quickEnergy_LiH.py | benmoseley/simple-variational-quantum-eigensolver | 3 | 12758057 | <reponame>benmoseley/simple-variational-quantum-eigensolver
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 15:38:02 2018
@author: bmoseley
"""
'''
Main code which searches for the ground state energy of a molecule specified by a Hamiltonian instance
using an Ansatz circuit object, a VQE instance and an optimiser.
Tests both SPSA and Bayesian optimisation optimisers.
Runs each optimiser with a n_initialisations different starting initialisations so that statistics on
convergence can be gathered. Plots the result.
'''
import numpy as np
import pickle
import multiprocessing
from HamiltonianFile import HamiltonianFile
from VQE import VQE
from LiHAnsatz import LiHAnsatz
from optimisers import SPSA, bayesOptimisation, best_value
# get ansatz
Ansatz = LiHAnsatz
# get Hamiltonian
Ham = HamiltonianFile('hamiltonians/LiHat145.txt')
Hamiltonian, hamiltonian = Ham.Hamiltonian, Ham.hamiltonian
LiH = VQE(hamiltonian=hamiltonian, Hamiltonian=Hamiltonian, Ansatz=LiHAnsatz)
# fixed parameters
n_steps = 200
quickEnergy = True
n_repeats = 0
stepSize = 0.05
lookSize = 0.05
acquisition_type = "MPI"
# other parameters
n_initialisations = 8# number of random initialisations (samples) to use
n_processes = 8# number of parallel processes to use
## RUN OPTIMISATIONS
# TODO: parallelisation should only be used for quickEnergy=True option (otherwise need to worry about random seed in VQE)
def run(i):
print("Running initialisation %i of %i.."%(i+1, n_initialisations))
np.random.seed(i)
initial_parameters = np.pi*(np.random.rand(Ansatz.n_parameters))# randomly vary initialisation
X,Y,Y_true,_,_ = SPSA(stepSize, lookSize, n_steps, initial_parameters, quickEnergy, LiH, n_repeats, seed=i)
Xb,Yb,Yb_true = bayesOptimisation(acquisition_type, n_steps, quickEnergy, LiH, n_repeats, seed=i)
return [X,Y,Y_true,Xb,Yb,Yb_true]
pool = multiprocessing.Pool(processes=n_processes)
results = pool.map(run, range(n_initialisations))# returns list of results
pool.close()# clean up file pointers
pool.join()
#SAVE
pickle.dump(results, open("results/results.pickle", "wb"))
# PLOT
import matplotlib.pyplot as plt
plt.figure()
plt.title("SPSA")
for result in results: plt.plot(best_value(result[0],result[1],result[2])[2])
plt.ylabel("Energy (Hartree)")
plt.figure()
plt.title("Bayesian optimisation")
for result in results: plt.plot(best_value(result[3],result[4],result[5])[2])
plt.ylabel("Energy (Hartree)")
| 2.34375 | 2 |
ngram.py | NalinPlad/Google-Ngram-Game | 0 | 12758058 | <reponame>NalinPlad/Google-Ngram-Game<filename>ngram.py
import requests
import json
import csv
import urllib
import random
print("Welcome to the Google Ngram Game! \n")
print("*--------------------------------* \n")
print("HOW TO PLAY----------------------* \n")
print("When prompted, enter [1] or [2] \n based on which of the two \n you think is used more in \n literature.")
print("ABOUT----------------------------* \n")
print("Google Ngram is a place where you \n can check the frequency of \n words used in writing. You \n can use Ngram at \n https://books.google.com/ngrams/")
smoothing = 10
words = []
with open('nouns.csv', newline='') as csvfile:
nouns = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in nouns:
words.append(''.join(row))
def get_word():
word = random.choice(words)
# print(f"Data for {word}")
url = f"https://books.google.com/ngrams/json?content={word}&year_start=2018&year_end=2019&corpus=26&smoothing={smoothing}"
r = urllib.request.urlopen(url)
data = r.read().decode(r.info().get_param('charset') or 'utf-8')
data1 = data.split(',')
data2 = ''.join(data1[4])
data3 = float(data2[0:-3])
data4 = format(data3, 'f')
return data4,word
while True:
word1 = get_word()
word2 = get_word()
print("\n *-----------Google Ngram Game------------* \n")
user_input = input(f"Which word do think is used more? {word1[1]} [1] or {word2[1]} [2] (Type 1 or 2): ")
if user_input is not '1' and user_input is not '2':
print("Please enter either [1] or [2]!")
continue
elif user_input == '1' and word1[0] > word2[0]:
print("Correct!")
else:
print("Incorrect!")
| 3.921875 | 4 |
program_chip.py | akashlevy/NI-RRAM-Python | 0 | 12758059 | """Script to program a bitstream to a chip"""
import argparse
from nirram import NIRRAM
# Get arguments
parser = argparse.ArgumentParser(description="Program a bitstream to a chip.")
parser.add_argument("chipname", help="chip name for logging")
parser.add_argument("bitstream", help="bitstream file name")
# Expect to receive two arg numbers when specifying a LRS (or HRS) range
parser.add_argument("--lrs-range", nargs='+', type=float, default=[9e3, 11e3], help="target LRS")
parser.add_argument("--hrs-range", nargs='+', type=float, default=[100e3, 1e9], help="target HRS")
parser.add_argument("--start-addr", type=int, default=0, help="start addr")
parser.add_argument("--end-addr", type=int, default=65536, help="end addr")
parser.add_argument("--step-addr", type=int, default=1, help="addr step")
parser.add_argument("--iterations", type=int, default=3, help="number of programming iterations")
args = parser.parse_args()
# Initialize NI system
nisys = NIRRAM(args.chipname)
# Read bitstream
bitstream = open(args.bitstream).readlines()
# Do operation across cells
for i in range(args.iterations):
for addr, bit in zip(range(args.start_addr, args.end_addr, args.step_addr), bitstream):
nisys.set_addr(addr)
bit = int(bit.strip())
if bit == 0: # bit 0: LRS
target = nisys.target(args.lrs_range[0], args.lrs_range[1])
if bit == 1: # bit 1: HRS
target = nisys.target(args.hrs_range[0], args.hrs_range[1])
print(f"Iteration {i}, Address {addr}: {target}")
# Shutdown
nisys.close()
| 3.28125 | 3 |
setup.py | kumagaimasahito/qbsolv | 0 | 12758060 | """Build the qbsolv package."""
from setuptools import setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext
import os
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
else:
USE_CYTHON = False
extra_compile_args = {
'msvc': [],
'unix': ['-std=c++11', '-Ofast', '-Wall', '-Wextra'],
# 'unix': ['-std=c++1y','-w','-O0', '-g', '-fipa-pure-const'],
}
extra_link_args = {
'msvc': [],
'unix': [],
}
class build_ext_compiler_check(build_ext):
def build_extensions(self):
compiler = self.compiler.compiler_type
compile_args = extra_compile_args[compiler]
for ext in self.extensions:
ext.extra_compile_args = compile_args
link_args = extra_compile_args[compiler]
for ext in self.extensions:
ext.extra_compile_args = link_args
build_ext.build_extensions(self)
ext = '.pyx' if USE_CYTHON else '.cpp'
extensions = [Extension('dwave_qbsolv.qbsolv_binding',
['python/dwave_qbsolv/qbsolv_binding' + ext,
'./python/globals.cc',
'./src/solver.cc',
'./src/dwsolv.cc',
'./src/util.cc'],
include_dirs=['./python', './src', './include', './cmd']
)]
if USE_CYTHON:
extensions = cythonize(extensions, language='c++')
packages = ['dwave_qbsolv']
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
python_requires = '>=3.5'
setup(
name='dwave-qbsolv',
version='0.3.2',
packages=packages,
package_dir={'dwave_qbsolv': 'python/dwave_qbsolv'},
install_requires=['dimod>=0.8.1,<0.10.0'],
ext_modules=extensions,
cmdclass={'build_ext': build_ext_compiler_check},
long_description=open('README.rst').read(),
classifiers=classifiers,
license='Apache 2.0',
python_requires=python_requires
)
| 2.09375 | 2 |
vlcp/service/sdn/icmpresponder.py | hubo1016/vlcp | 252 | 12758061 | import itertools
import os
import vlcp.service.sdn.ofpportmanager as ofpportmanager
import vlcp.service.kvdb.objectdb as objectdb
import vlcp.service.sdn.ioprocessing as iop
from vlcp.service.sdn.flowbase import FlowBase
from vlcp.server.module import depend, call_api
from vlcp.config.config import defaultconfig
from vlcp.event.runnable import RoutineContainer
from vlcp.service.sdn.ofpmanager import FlowInitialize
from vlcp.utils.ethernet import mac_addr_bytes, ip4_addr_bytes,ip4_icmp_payload,\
ethernet_l7, ip4_packet_l7, ip4_payload,ICMP_ECHOREPLY,icmp_bestparse,icmp_echo,\
ip_frag
from vlcp.utils.flowupdater import FlowUpdater
from vlcp.protocol.openflow.openflow import OpenflowConnectionStateEvent, OpenflowAsyncMessageEvent
from vlcp.utils.networkmodel import SubNet,RouterPort
from namedstruct.stdprim import uint16
from vlcp.event.event import M_
class ICMPResponderUpdater(FlowUpdater):
def __init__(self,connection,parent):
super(ICMPResponderUpdater,self).__init__(connection,(),('icmpresponderupdate',connection),parent._logger)
self.parent = parent
self._lastlognets = ()
self._lastlogports = ()
self._lastsubnetsinfo = dict()
self._orig_initialkeys = ()
async def main(self):
try:
self.subroutine(self._update_handler(),True,"update_handler_routine")
# use controller to reply icmp ping ,so start routine handler packet in
if not self.parent.prepush:
self.subroutine(self._icmp_packetin_handler(),True,"icmp_packetin_handler_routine")
await FlowUpdater.main(self)
finally:
if hasattr(self,"update_handler_routine"):
self.update_handler_routine.close()
if hasattr(self,"icmp_packetin_handler_routine"):
self.icmp_packetin_handler_routine.close()
async def _icmp_packetin_handler(self):
conn = self._connection
ofdef = self._connection.openflowdef
l3input = self.parent._gettableindex("l3input",self._connection.protocol.vhost)
transactid = uint16.create(os.urandom(2))
async def send_packet_out(portid,packet):
await self.execute_commands(conn,
[
ofdef.ofp_packet_out(
buffer_id = ofdef.OFP_NO_BUFFER,
in_port = ofdef.OFPP_CONTROLLER,
actions = [
ofdef.ofp_action_output(port = portid,
max_len = ofdef.OFPCML_NO_BUFFER
)
],
data = packet._tobytes()
)
])
icmp_packetin_matcher = OpenflowAsyncMessageEvent.createMatcher(ofdef.OFPT_PACKET_IN,None,None,l3input,2,
self._connection,self._connection.connmark)
while True:
ev = await icmp_packetin_matcher
msg = ev.message
inport = ofdef.ofp_port_no.create(ofdef.get_oxm(msg.match.oxm_fields,ofdef.OXM_OF_IN_PORT))
# it must be icmp packet ...
icmp_packet = ethernet_l7.create(msg.data)
if ip_frag(icmp_packet) != 0:
# ignore fragmented packets
continue
transactid = (transactid + 1) & 0xffff
reply_packet = ip4_packet_l7((ip4_payload,ip4_icmp_payload),
(icmp_bestparse, icmp_echo),
dl_src = icmp_packet.dl_dst,
dl_dst = icmp_packet.dl_src,
ip_src = icmp_packet.ip_dst,
ip_dst = icmp_packet.ip_src,
frag_off = 0,
ttl = 128,
identifier = transactid,
icmp_type = ICMP_ECHOREPLY,
icmp_code = icmp_packet.icmp_code,
icmp_id = icmp_packet.icmp_id,
icmp_seq = icmp_packet.icmp_seq,
data = icmp_packet.data
)
self.subroutine(send_packet_out(inport,reply_packet))
async def _update_handler(self):
# when lgport,lgnet,phyport,phynet object change , receive this event from ioprocessing module
dataobjectchange = iop.DataObjectChanged.createMatcher(None,None,self._connection)
while True:
ev = await dataobjectchange
# save to instance attr , us in other method
self._lastlogports,_,self._lastlognets,_ = ev.current
self._update_walk()
def _walk_lgport(self,key,value,walk,save):
if value is not None:
save(key)
if hasattr(value,'subnet'):
try:
subnetobj = walk(value.subnet.getkey())
except KeyError:
pass
else:
save(value.subnet.getkey())
if subnetobj is not None and hasattr(subnetobj,"router"):
try:
_ = walk(subnetobj.router.getkey())
except KeyError:
pass
else:
save(subnetobj.router.getkey())
def _walk_lgnet(self,key,value,walk,save):
save(key)
# if value is None, also save its key
# means watch key, when created , we will recv event
def _update_walk(self):
lgportkeys = [p.getkey() for p,_ in self._lastlogports]
lgnetkeys = [p.getkey() for p,_ in self._lastlognets]
self._initialkeys = lgportkeys + lgnetkeys
self._orig_initialkeys = lgportkeys + lgnetkeys
self._walkerdict = dict(itertools.chain(((p,self._walk_lgport) for p in lgportkeys),
((n,self._walk_lgnet) for n in lgnetkeys)))
self.subroutine(self.restart_walk(),False)
def reset_initialkeys(self,keys,values):
# walk map logicalport --> subnet ---> routerport
# we get subnet object, add keys to initialkeys,
# when subnet update, it will restart walk ,, after we will get new routerport
subnetkeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(SubNet)]
self._initialkeys = tuple(itertools.chain(self._orig_initialkeys,subnetkeys))
async def updateflow(self, connection, addvalues, removevalues, updatedvalues):
try:
allobjects = set(o for o in self._savedresult if o is not None and not o.isdeleted())
lastsubnetsinfo = self._lastsubnetsinfo
currentlognetsinfo = dict((n,id) for n,id in self._lastlognets if n in allobjects)
currentrouterportsinfo = dict((o.subnet,o) for o in allobjects
if o.isinstance(RouterPort))
currentsubnetsinfo = dict((o,(getattr(currentrouterportsinfo[o],"ip_address",getattr(o,"gateway",None)),
self.parent.inroutermac,o.network.id,currentlognetsinfo[o.network]))
for o in allobjects if o.isinstance(SubNet)
and hasattr(o,"router") and o in currentrouterportsinfo
and o.network in currentlognetsinfo
and (hasattr(currentrouterportsinfo[o],"ip_address")
or hasattr(o,"gateway"))
and ( not hasattr(o,"isexternal") or o.isexternal == False))
self._lastsubnetsinfo = currentsubnetsinfo
ofdef = connection.openflowdef
vhost = connection.protocol.vhost
l3input = self.parent._gettableindex("l3input",vhost)
cmds = []
if connection.protocol.disablenxext:
def match_network(nid):
return ofdef.create_oxm(ofdef.OXM_OF_METADATA_W, (nid & 0xffff) << 32,
b'\x00\x00\xff\xff\x00\x00\x00\x00')
else:
def match_network(nid):
return ofdef.create_oxm(ofdef.NXM_NX_REG4, nid)
# prepush or not ,, it is same , so ..
def _deleteicmpflows(ipaddress, macaddress, networkid):
return [
ofdef.ofp_flow_mod(
cookie = 0x2,
cookie_mask = 0xffffffffffffffff,
table_id = l3input,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(
oxm_fields = [
ofdef.create_oxm(ofdef.NXM_NX_REG4,networkid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,mac_addr_bytes(macaddress)),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ip4_addr_bytes(ipaddress)),
ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO,ofdef.IPPROTO_ICMP),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE,8),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE,0)
]
)
)
]
if not self.parent.prepush:
def _createicmpflows(ipaddress, macaddress, networkid):
return [
ofdef.ofp_flow_mod(
cookie = 0x2,
cookie_mask = 0xffffffffffffffff,
table_id = l3input,
command = ofdef.OFPFC_ADD,
# icmp to router matcher same as ip forward to router
# so priority + 1
priority = ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(
oxm_fields = [
match_network(networkid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,mac_addr_bytes(macaddress)),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ip4_addr_bytes(ipaddress)),
ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO,ofdef.IPPROTO_ICMP),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE,8),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE,0)
]
),
instructions = [
ofdef.ofp_instruction_actions(
actions = [
ofdef.ofp_action_output(
port = ofdef.OFPP_CONTROLLER,
max_len = ofdef.OFPCML_NO_BUFFER
)
]
)
]
)
]
else:
def _createicmpflows(ipaddress, macaddress, networkid):
return [
ofdef.ofp_flow_mod(
cookie = 0x2,
cookie_mask = 0xffffffffffffffff,
table_id = l3input,
command = ofdef.OFPFC_ADD,
# icmp to router matcher same as ip forward to router
# so priority + 1
priority = ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(
oxm_fields = [
match_network(networkid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,mac_addr_bytes(macaddress)),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ip4_addr_bytes(ipaddress)),
ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO,ofdef.IPPROTO_ICMP),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE,8),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE,0)
]
),
instructions = [
ofdef.ofp_instruction_actions(
actions = [
ofdef.nx_action_reg_move(
n_bits = 48,
src = ofdef.OXM_OF_ETH_SRC,
dst = ofdef.OXM_OF_ETH_DST
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(
ofdef.OXM_OF_ETH_SRC,
ofdef.mac_addr(macaddress)
)
),
ofdef.nx_action_reg_move(
n_bits = 32,
src = ofdef.OXM_OF_IPV4_SRC,
dst = ofdef.OXM_OF_IPV4_DST
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(
ofdef.OXM_OF_IPV4_SRC,
ofdef.ip4_addr(ipaddress)
)
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(
ofdef.OXM_OF_ICMPV4_TYPE,
ICMP_ECHOREPLY
)
),
ofdef.ofp_action_nw_ttl(
nw_ttl = 128
),
ofdef.ofp_action_output(
port = ofdef.OFPP_IN_PORT
)
]
)
]
)
]
for subnet in lastsubnetsinfo.keys():
if subnet not in currentsubnetsinfo\
or (subnet in currentsubnetsinfo and lastsubnetsinfo[subnet] != currentsubnetsinfo[subnet]):
# subnet remove or subnet info changed , remove flow info
ip_address, mac_address, networkid, nid = lastsubnetsinfo[subnet]
remove_arp = {(ip_address,mac_address,networkid,True),}
await call_api(self, 'arpresponder', 'removeproxyarp', {'connection':connection,
'arpentries': remove_arp})
cmds.extend(_deleteicmpflows(ip_address,mac_address,nid))
await self.execute_commands(connection, cmds)
for subnet in currentsubnetsinfo.keys():
if subnet not in lastsubnetsinfo\
or (subnet in lastsubnetsinfo and lastsubnetsinfo[subnet] != currentsubnetsinfo[subnet]):
ip_address, mac_address, networkid, nid = currentsubnetsinfo[subnet]
add_arp = {(ip_address,mac_address,networkid,True),}
await call_api(self, 'arpresponder', 'createproxyarp', {'connection': connection,
'arpentries': add_arp})
cmds.extend(_createicmpflows(ip_address,mac_address,nid))
await self.execute_commands(connection, cmds)
except Exception:
self._logger.warning("Unexpected exception in icmp_flow_updater, ignore it! Continue",exc_info=True)
@defaultconfig
@depend(ofpportmanager.OpenflowPortManager,objectdb.ObjectDB)
class ICMPResponder(FlowBase):
"""
Respond ICMP echo (ping) requests to the gateway
"""
_tablerequest = (
("l3input",("l2input",),""),
("l2output",("l3input",),"")
)
# True : reply icmp ping with flow
# False: reply icmp ping with controller PACKET_IN/PACKET_OUT
#
# Must use prepush=True with OpenvSwitch 2.5+
#
_default_prepush = False
# "Gateway" responds with this MAC address
_default_inroutermac = '1a:23:67:59:63:33'
def __init__(self,server):
super(ICMPResponder,self).__init__(server)
self.app_routine = RoutineContainer(self.scheduler)
self.app_routine.main = self._main
self.routines.append(self.app_routine)
self._flowupdater = dict()
async def _main(self):
flowinit = FlowInitialize.createMatcher(_ismatch=lambda x: self.vhostbind is None or
x.vhost in self.vhostbind)
conndown = OpenflowConnectionStateEvent.createMatcher(state = OpenflowConnectionStateEvent.CONNECTION_DOWN,
_ismatch=lambda x:self.vhostbind is None or
x.createby.vhost in self.vhostbind)
while True:
ev, m = await M_(flowinit,conndown)
if m is flowinit:
c = ev.connection
self.app_routine.subroutine(self._init_conn(c))
if m is conndown:
c = ev.connection
self.app_routine.subroutine(self._remove_conn(c))
async def _init_conn(self,conn):
if conn in self._flowupdater:
updater = self._flowupdater.pop(conn)
updater.close()
updater = ICMPResponderUpdater(conn,self)
self._flowupdater[conn] = updater
updater.start()
async def _remove_conn(self,conn):
if conn in self._flowupdater:
updater = self._flowupdater.pop(conn)
updater.close()
| 1.875 | 2 |
pyeccodes/defs/grib2/local_85_2_def.py | ecmwf/pyeccodes | 7 | 12758062 | <filename>pyeccodes/defs/grib2/local_85_2_def.py
import pyeccodes.accessors as _
def load(h):
h.add(_.Transient('defaultFaFieldName', ""))
h.add(_.Transient('defaultFaLevelName', ""))
h.add(_.Transient('defaultFaModelName', ""))
h.add(_.Concept('faFieldName', 'defaultFaFieldName', 'faFieldName.def', 'conceptsMasterDir', 'conceptsLocalDirAll', False))
h.add(_.Concept('faLevelName', 'defaultFaLevelName', 'faLevelName.def', 'conceptsMasterDir', 'conceptsLocalDirAll', False))
h.add(_.Concept('faModelName', 'defaultFaModelName', 'faModelName.def', 'conceptsMasterDir', 'conceptsLocalDirAll', False))
h.add(_.Transient('LSTCUM', 0))
h.add(_.Transient('ZLMULT', 1))
h.add(_.Transient('ZLBASE', 0))
h.add(_.Ascii('CLNOMA', 16))
h.add(_.Unsigned('INGRIB', 8))
h.add(_.Unsigned('LLCOSP', 8))
h.add(_.Unsigned('INBITS', 8))
h.add(_.Signed('FMULTM', 8))
h.add(_.Signed('FMULTE', 8))
h.add(_.Unsigned('ICPLSIZE', 8))
| 1.921875 | 2 |
core/apps/users/sync/ldap.py | jedia168/KubeOperator | 3 | 12758063 | import json
from django.contrib.auth.models import User
from ldap3 import Server, Connection
from kubeops_api.models.setting import Setting
from message_center.models import UserNotificationConfig, UserReceiver, Message
class LDAPSync:
def __init__(self):
self._conn = None
settings = Setting.get_settings(tab='ldap')
self.ldap_enable = settings.get("AUTH_LDAP_ENABLE", False)
if not self.ldap_enable:
return
self.bind_dn = settings.get("AUTH_LDAP_BIND_DN")
self.bind_password = settings.get("AUTH_LDAP_BIND_PASSWORD")
self.search_ou = settings.get("AUTH_LDAP_SEARCH_OU")
self.search_filter = settings.get("AUTH_LDAP_SEARCH_FILTER")
self.server_uri = settings.get("AUTH_LDAP_SERVER_URI")
self.attr_map = json.loads(settings.get("AUTH_LDAP_USER_ATTR_MAP"))
@property
def connection(self):
if self._conn:
return self._conn
server = Server(self.server_uri, use_ssl=False)
conn = Connection(server, self.bind_dn, self.bind_password)
conn.bind()
self._conn = conn
return self._conn
def search_users(self):
user_entries = list()
search_ous = str(self.search_ou).split('|')
for ou in search_ous:
self.search_user_entries_ou(search_ou=ou)
user_entries.extend(self.connection.entries)
return user_entries
def search_user_entries_ou(self, search_ou):
search_filter = self.search_filter % {'user': '*'}
attributes = list(self.attr_map.values())
self.connection.search(
search_base=search_ou, search_filter=search_filter,
attributes=attributes)
def user_entry_to_dict(self, entry):
user = {}
attr_map = self.attr_map.items()
for attr, mapping in attr_map:
if not hasattr(entry, mapping):
continue
value = getattr(entry, mapping).value or ''
user[attr] = value
return user
def user_entries_to_dict(self, user_entries):
users = []
for user_entry in user_entries:
user = self.user_entry_to_dict(user_entry)
users.append(user)
return users
def run(self):
user_entries = self.search_users()
user_dicts = self.user_entries_to_dict(user_entries)
for ud in user_dicts:
defaults = {
"username": ud.get("username", None).strip(),
"email": ud.get("email", None)
}
if not defaults["username"] or not defaults["email"]:
continue
User.objects.get_or_create(defaults, username=defaults.get("username"))
vars = {
"LOCAL": "ENABLE",
"EMAIL": "DISABLE",
"DINGTALK": "DISABLE",
"WORKWEIXIN": "DISABLE",
}
user = User.objects.get(username=defaults["username"])
UserNotificationConfig(vars=vars, user=user, type=Message.MESSAGE_TYPE_CLUSTER).save()
UserNotificationConfig(vars=vars, user=user, type=Message.MESSAGE_TYPE_SYSTEM).save()
vars2 = {
"EMAIL": user.email,
"DINGTALK": "",
"WORKWEIXIN": "",
}
UserReceiver(vars=vars2, user=user).save() | 1.820313 | 2 |
Auth/urls.py | michaelzap94/lasantabiblia-platform-django | 0 | 12758064 | <filename>Auth/urls.py
#THESE pre-packaged mthods will need 'username', you can also use your own methods
from rest_framework.authtoken import views as authviews
from rest_framework_simplejwt import views as jwt_views
from django.urls import path
from django.conf.urls import include, url
from . import views
urlpatterns = [
#Token
path('api/token/signup/', views.registration_view, name='signup_token'),
#you can use a custom view if you need to return more data apart from token(this requires 'username')
path('api/token/login/', authviews.obtain_auth_token, name='login_token'),
#JWT=====================================================================================================
path('api/jwt/verify/', jwt_views.TokenVerifyView.as_view(), name='verify_jwt'),
path('api/jwt/signup/', views.registration_view_jwt, name='signup_jwt'),
#path('api/jwt/login/', jwt_views.TokenObtainPairView.as_view(), name='login_jwt'), #built-in login
path('api/jwt/login/', views.MyTokenObtainPairView.as_view(), name='login_jwt'),
path('api/jwt/refresh/', jwt_views.TokenRefreshView.as_view(), name='refresh_jwt'),
path('api/jwt/logout/', views.logout_view_jwt, name='logout_jwt'),
#JWT SOCIAL
path('api/jwt/signup/social/', views.registration_view_jwt_social, name='signup_jwt_social'),
path('api/jwt/login/social/', views.login_view_jwt_social, name='login_jwt_social'),
#=====================================================================================================
#EXTRA
path('api/no-token/signup/', views.RegisterUserOnlyView.as_view(), name='signup_no_token'),
path('test/labels/', views.TestAllLabels.as_view(), name='test'),
] | 2.265625 | 2 |
11_stream/src/push_notification_to_sns.py | dpai/workshop | 2,327 | 12758065 | <reponame>dpai/workshop
from __future__ import print_function
import boto3
import base64
import os
SNS_TOPIC_ARN = os.environ["SNS_TOPIC_ARN"]
sns = boto3.client("sns")
print("Loading function")
def lambda_handler(event, context):
output = []
success = 0
failure = 0
highest_score = 0
print("event: {}".format(event))
r = event["records"]
print("records: {}".format(r))
print("type_records: {}".format(type(r)))
for record in event["records"]:
try:
# Uncomment the below line to publish the decoded data to the SNS topic.
payload = base64.b64decode(record["data"])
print("payload: {}".format(payload))
text = payload.decode("utf-8")
print("text: {}".format(text))
score = float(text)
if (score != 0) and (score > highest_score):
highest_score = score
print("New highest_score: {}".format(highest_score))
# sns.publish(TopicArn=SNS_TOPIC_ARN, Message='New anomaly score: {}'.format(text), Subject='New Reviews Anomaly Score Detected')
output.append({"recordId": record["recordId"], "result": "Ok"})
success += 1
except Exception as e:
print(e)
output.append({"recordId": record["recordId"], "result": "DeliveryFailed"})
failure += 1
if highest_score != 0:
sns.publish(
TopicArn=SNS_TOPIC_ARN,
Message="New anomaly score: {}".format(str(highest_score)),
Subject="New Reviews Anomaly Score Detected",
)
print("Successfully delivered {0} records, failed to deliver {1} records".format(success, failure))
return {"records": output}
| 2.21875 | 2 |
scripts/docs_helper.py | viktorfreiman/smartthings-rest | 4 | 12758066 | __doc__ = """
docs_helper.py
--------------
| Small script that makes it easier to work with intersphinx it takes config data from docs/conf.py
| :doc:`sphinx:usage/extensions/intersphinx`
:ref:`sphinx:xref-syntax`
| insted of: ``python -m sphinx.ext.intersphinx https://www.sphinx-doc.org/en/master/objects.inv``
| run ``python scripts/docs_helper.py.py`` and then pick the project you want.
Wraps also over `sphobjinv <https://github.com/bskinn/sphobjinv>`_ to make easy to search
"localhost" need to have a webserver with built html from sphinx,
have ``autobuild-html-docs`` running
.. todo::
have better support for finding port of autobuild-html-docs
use psutil?
"""
import sys
from pathlib import Path
import os
from urllib.parse import urljoin
import signal
def signal_SIGINT_handler(signal, frame):
"""This is to have a nicer printout on KeyboardInterrupt"""
print("\nGOT SIGINT(Probably KeyboardInterrupt), Quitting")
sys.exit(0)
def main():
project_path = Path(__file__).absolute().parents[1]
# we need this for import to work
sys.path.insert(0, str(project_path))
print("Intersphinx objects.inv printout")
print("intersphinx_mapping is from docs/conf.py")
from docs.conf import intersphinx_mapping
# add localhost to make easy to see self
intersphinx_mapping["localhost"] = ("http://127.0.0.1:8000/", None)
for i, doc in enumerate(intersphinx_mapping.keys()):
print(f"{i}) {doc}")
int_picker = int(input("Pick a number for docs: "))
picked_name = list(intersphinx_mapping.keys())[int_picker]
print(f"picked: {picked_name}\n")
type_picker = int(
input(
f"0) Print all from objects.inv from {picked_name}\n"
f"1) Search and suggest object\n"
f"Select mode: "
)
)
# the extra slash if it is missing in the config data
# and urljoin will fix the url for us
obj_inv_url = urljoin(intersphinx_mapping[picked_name][0] + "/", "objects.inv")
if type_picker:
print("--- sphobjinv ---")
search = input("Search: ")
cli = f"sphobjinv suggest {obj_inv_url} {search} -su"
else:
print("--- intersphinx ---")
cli = f"{sys.executable} -m sphinx.ext.intersphinx {obj_inv_url}"
os.system(cli)
# todo: Change this printout to use triple quotes insted
print(
"--- Note ---\n"
"Please note the output from this tools\n"
"need to be changed to work as a cross-references\n"
"Exemple:\n"
":std:label:`thing_to_link` -> :ref:`thing_to_link`\n"
"or\n"
":std:label:`thing_to_link` -> :ref:`project_name:thing_to_link`\n\n"
":py:function:`that_func` -> :py:func:`that_func`\n"
"or\n"
":py:function:`that_func` -> :py:func:`project_name:that_func`\n"
"--- Note ---\n"
)
print(
"--- Links ---\n"
"Link for intersphinx cross-referencing tags\n"
"https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#xref-syntax\n" # noqa b950
"--- Links ---\n"
)
print(f"CLI:\n{cli}")
if __name__ == "__main__":
# activate signal
signal.signal(signal.SIGINT, signal_SIGINT_handler)
main()
| 2.859375 | 3 |
onadata/apps/staff/migrations/0009_auto_20180416_1438.py | awemulya/fieldsight-kobocat | 38 | 12758067 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('staff', '0008_auto_20180412_1515'),
]
operations = [
migrations.AlterField(
model_name='staff',
name='photo',
field=models.ImageField(default=b'/static/images/default_user.png', upload_to=b'staffs'),
),
migrations.AlterUniqueTogether(
name='attendance',
unique_together=set([('attendance_date', 'team')]),
),
]
| 1.515625 | 2 |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/config/settings/test.py | simonbru/django-template | 22 | 12758068 | from .base import * # noqa
SECRET_KEY = "test"
DEBUG = False
# Always use local memory cache, don't bother trying memcached or similar
CACHES = {"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}}
# use basic password hashing for tests for better performance
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# Disable logging messages
LOGGING = {}
| 1.328125 | 1 |
search/transliterators/erzya.py | LingConLab/Yakut_cs_corpus | 0 | 12758069 | <reponame>LingConLab/Yakut_cs_corpus<gh_stars>0
import re
cyr2upa = {'я': 'ʼa', 'е': 'ʼe', 'ѣ': 'ʼe', 'и': 'ʼi',
'ё': 'ʼo', 'ю': 'ʼu', 'ь': 'ʼ', 'і': 'ʼi',
'а': 'a', 'б': 'b', 'в': 'v', 'г': 'g',
'д': 'd', 'ж': 'ž', 'з': 'z', 'к': 'k',
'л': 'l', 'м': 'm', 'н': 'n', 'о': 'o',
'п': 'p', 'р': 'r', 'с': 's', 'т': 't',
'у': 'u', 'ф': 'f', 'х': 'x', 'ц': 'c',
'ч': 'č', 'ш': 'š', 'щ': 'štʼ', 'ъ': 'j',
'ы': 'i̮', 'э': 'e', 'й': 'j', 'ҥ': 'n', 'ѳ': 'f'}
rxYer = re.compile('ъ+\\b')
rxCyrVJV = re.compile('([aeiou])ʼ([aeou])')
rxCyrJV = re.compile('\\bʼ([aeou])')
rxCyrNeutral = re.compile('(?<=[bvgžkmpxčšj])ʼ', flags=re.I)
rxCyrRegressiveSoft = re.compile('([dzlnrstc])([dzlnrstc])(?=ʼ)')
rxCyrMultSoften = re.compile('ʼ{2,}')
rxCyrVSoft = re.compile('([aeiou]|\\b)ʼ', flags=re.I)
def erzya_translit_upa(text):
"""
Transliterate Erzya text from Cyrillic script to Latin UPA.
"""
text = rxYer.sub('', text)
text = text.replace('жи', 'жӥ')
text = text.replace('ши', 'шӥ')
text = text.replace('же', 'жэ')
text = text.replace('ше', 'шэ')
text = text.replace('Жи', 'Жӥ')
text = text.replace('Ши', 'Шӥ')
text = text.replace('Же', 'Жэ')
text = text.replace('Ше', 'Шэ')
letters = []
for letter in text:
if letter.lower() in cyr2upa:
if letter.islower():
letters.append(cyr2upa[letter.lower()])
else:
letters.append(cyr2upa[letter.lower()].upper())
else:
letters.append(letter)
res = ''.join(letters)
res = rxCyrVJV.sub('\\1j\\2', res)
res = rxCyrJV.sub('j\\1', res)
res = res.replace('ъʼ', 'j')
res = rxCyrNeutral.sub('', res)
for i in range(5):
res = rxCyrRegressiveSoft.sub('\\1ʼ\\2', res)
res = rxCyrMultSoften.sub('ʼ', res)
res = rxCyrVSoft.sub('\\1', res)
res = res.replace('sʼ', 'ś')
res = res.replace('zʼ', 'ź')
res = res.replace('čʼ', 'č')
res = res.replace('nʼ', 'ń')
res = res.replace('cʼ', 'ć')
res = res.replace('rʼ', 'ŕ')
res = res.replace('Sʼ', 'Ś')
res = res.replace('Zʼ', 'Ź')
res = res.replace('Čʼ', 'Č')
res = res.replace('Nʼ', 'Ń')
res = res.replace('Cʼ', 'Ć')
res = res.replace('Rʼ', 'Ŕ')
return res
| 2.03125 | 2 |
Projects/DeepLearningTechniques/ShakeNet/imagenet/model.py | Tim232/Python-Things | 2 | 12758070 | import tensorflow.contrib.slim as slim
from Projects.DeepLearningTechniques.ShakeNet.imagenet.constants import *
class Model:
def __init__(self, sess, width, height, channel, lr, dr, is_training, is_tb_logging, name):
self.sess = sess
self.width = width
self.height = height
self.channel = channel
self.lr = lr
self.dr = dr
self.is_training = is_training
self.is_tb_logging = is_tb_logging
self.name = name
self.weights_initializers = tf.contrib.layers.xavier_initializer(uniform=False)
self.weights_regularizers = tf.contrib.layers.l2_regularizer(scale=flags.FLAGS.l2_scale)
self.summary_values = []
self._build_graph()
def _build_graph(self):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
with tf.variable_scope(name_or_scope='input_scope'):
self.x = tf.placeholder(dtype=tf.float32, shape=[None, self.height, self.width, self.channel], name='x')
self.y = tf.placeholder(dtype=tf.int64, shape=[None], name='y')
with tf.variable_scope(name_or_scope='body_scope'):
layer = self.conv2d(inputs=self.x, filters=32, kernel_size=3, strides=2, name='conv2d_0')
layer = self.batch_norm(inputs=layer, name='conv2d_0_batch')
layer = self.inverted_bottleneck(inputs=layer, filters=16, strides=1, repeat=1, factor=1, name='bottleneck_1')
layer = self.inverted_bottleneck(inputs=layer, filters=24, strides=2, repeat=2, factor=4, name='bottleneck_2')
layer = self.inverted_bottleneck(inputs=layer, filters=32, strides=2, repeat=3, factor=4, name='bottleneck_3')
layer = self.inverted_bottleneck(inputs=layer, filters=64, strides=2, repeat=4, factor=4, name='bottleneck_4')
layer = self.inverted_bottleneck(inputs=layer, filters=96, strides=1, repeat=1, factor=4, name='bottleneck_5')
layer = self.inverted_bottleneck(inputs=layer, filters=160, strides=2, repeat=3, factor=6, name='bottleneck_6')
layer = self.inverted_bottleneck(inputs=layer, filters=320, strides=1, repeat=1, factor=6, name='bottleneck_7')
if self.is_tb_logging:
self.summary_values.append(tf.summary.histogram('bottleneck_module', layer))
layer = self.conv2d(inputs=layer, filters=1280, name='conv2d_8')
layer = self.batch_norm(inputs=layer, name='conv2d_8_batch')
self.cam_layer = layer
layer = self.dropout(inputs=layer, rate=flags.FLAGS.dropout_rate, name='conv2d_8_dropout')
layer = tf.layers.average_pooling2d(inputs=layer, pool_size=7, strides=1, name='conv2d_8_avg_pool')
layer = self.conv2d(inputs=layer, filters=flags.FLAGS.image_class, name='conv2d_8_output')
self.logits = tf.squeeze(input=layer, axis=[1, 2], name='logits')
with tf.variable_scope(name_or_scope='output_scope'):
self.variables = [var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if self.name in var.name]
self.prob = tf.nn.softmax(logits=self.logits, name='softmax')
self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y, name='ce_loss'))
self.loss = tf.add_n([self.loss] +
[var for var in tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) if self.name in var.name], name='tot_loss')
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, -1), self.y), dtype=tf.float32))
if self.is_tb_logging:
self.summary_values.append(tf.summary.scalar('loss', self.loss))
self.summary_values.append(tf.summary.scalar('accuracy', self.accuracy))
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)
update_opt = [var for var in tf.get_collection(tf.GraphKeys.UPDATE_OPS) if self.name in var.name]
with tf.control_dependencies(update_opt):
self.train_op = self.optimizer.minimize(self.loss, var_list=self.variables)
if self.is_tb_logging:
self.summary_merged_values = tf.summary.merge(inputs=self.summary_values)
def batch_norm(self, inputs, act=tf.nn.relu6, name='batch_norm_layer'):
'''
Batch Normalization
- scale=True, scale factor(gamma) 를 사용
- center=True, shift factor(beta) 를 사용
'''
with tf.variable_scope(name_or_scope=name):
return tf.contrib.layers.batch_norm(inputs=inputs, decay=0.9, center=True, scale=True, fused=True,
updates_collections=tf.GraphKeys.UPDATE_OPS, activation_fn=act,
is_training=self.is_training, scope='batch_norm')
def conv2d(self, inputs, filters, kernel_size=1, strides=1, padding='same', act=tf.identity, name='conv2d_layer'):
return tf.layers.conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=padding, activation=act,
kernel_initializer=self.weights_initializers,
bias_initializer=self.weights_initializers,
kernel_regularizer=self.weights_regularizers,
bias_regularizer=self.weights_regularizers,
name=name)
def dropout(self, inputs, rate, name):
with tf.variable_scope(name_or_scope=name):
return tf.layers.dropout(inputs=inputs, rate=rate, training=self.is_training, name='dropout')
def depthwise_conv2d(self, inputs, kernel_size=3, strides=2, padding='SAME', depth_multiplier=1, name=None):
layer = slim.separable_conv2d(inputs=inputs, num_outputs=None, kernel_size=kernel_size, activation_fn=tf.identity,
weights_initializer=self.weights_initializers, weights_regularizer=self.weights_regularizers,
depth_multiplier=depth_multiplier, stride=strides, padding=padding, scope=name)
return layer
def inverted_bottleneck(self, inputs, filters, strides, repeat, factor, name=None):
def _mobilenet_block(inputs, input_filters, output_filters, strides, name):
with tf.variable_scope(name_or_scope=name):
layer = self.conv2d(inputs=inputs, filters=input_filters * factor, name='bottleneck_layer')
layer = self.batch_norm(inputs=layer, name='bottleneck_batch')
layer = self.depthwise_conv2d(inputs=layer, strides=strides, name='depthwise_layer')
layer = self.batch_norm(inputs=layer, name='depthwise_batch')
layer = self.conv2d(inputs=layer, filters=output_filters, name='linear_layer')
layer = self.batch_norm(inputs=layer, act=tf.identity, name='linear_batch')
return layer
prev_layer = inputs
input_filters = inputs.get_shape().as_list()[-1]
with tf.variable_scope(name_or_scope=name):
for idx in range(repeat):
layer = _mobilenet_block(inputs=prev_layer, input_filters=input_filters, output_filters=filters,
strides=strides, name='mobilenet_block_{}'.format(idx))
'''inverted_bottleneck 내의 첫 번째 layer 가 strides=2 인 경우 shortcut connection 생략'''
if idx != 0 and strides != 2:
if prev_layer.get_shape().as_list()[-1] != layer.get_shape().as_list()[-1]:
prev_layer = self.conv2d(inputs=prev_layer, filters=filters, name='residual_match_{}'.format(idx))
layer = tf.add(prev_layer, layer, name='residual_add_{}'.format(idx))
'''마지막 repeat 단계는 제외'''
if idx != repeat-1:
strides = 1
prev_layer = layer
return layer
def train(self, x, y):
if self.is_tb_logging:
return self.sess.run([self.accuracy, self.loss, self.summary_merged_values, self.train_op], feed_dict={self.x: x, self.y: y})
else:
return self.sess.run([self.accuracy, self.loss, self.train_op], feed_dict={self.x: x, self.y: y})
def validation(self, x, y):
return self.sess.run([self.accuracy, self.loss, self.prob], feed_dict={self.x: x, self.y: y})
def test(self, x, y):
return self.sess.run([self.accuracy, self.loss, self.prob], feed_dict={self.x: x, self.y: y}) | 2.40625 | 2 |
challenges/left_join/left_join.py | nastinsk/python-data-structures-and-algorithms | 0 | 12758071 | <gh_stars>0
def left_join(d1, d2):
"""
Function that a LEFT JOINs 2 dictionaries into a single data structure.
All the values in the first dictionary are returned, and if values exist in the “right” dictionary, they are added to the {results dictionary}. If no values exist in the right dictionary, then None should be added to the {results dictionary}
The key in dictionary case-sensitive, for example "cat" and "Cat" will be treated as a different keys.
Input: {dictionary1}, {dictionary2}
Output: {results dictionary}
"""
results = {}
for key in d1:
results[key]=[d1[key]]
val = None
if key in d2:
val = d2[key]
results[key].append(val)
return results
| 4.0625 | 4 |
examples/np_semantic_segmentation/train.py | laugustyniak/nlp-architect | 0 | 12758072 | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from neon import logger as neon_logger
from neon.backends import gen_backend
from neon.util.argparser import NeonArgparser
from examples.np_semantic_segmentation.data import NpSemanticSegData, absolute_path
from nlp_architect.models.np_semantic_segmentation import NpSemanticSegClassifier
from nlp_architect.utils.io import validate_existing_filepath, validate_parent_exists
def train_mlp_classifier(dataset, model_file_path, num_epochs, callback_args):
"""
Train the np_semantic_segmentation mlp classifier
Args:
model_file_path (str): model path
num_epochs (int): number of epochs
callback_args (dict): callback_arg
dataset: NpSemanticSegData object containing the dataset
Returns:
print error_rate, test_accuracy_rate and precision_recall_rate evaluation from the model
"""
model = NpSemanticSegClassifier(num_epochs, callback_args)
model.build()
# run fit
model.fit(dataset.test_set, dataset.train_set)
# save model params
model.save(model_file_path)
# set evaluation error rates
error_rate, test_accuracy_rate, precision_recall_rate = model.eval(dataset.test_set)
neon_logger.display('Misclassification error = %.1f%%' %
(error_rate * 100))
neon_logger.display('Test accuracy rate = %.1f%%' %
(test_accuracy_rate * 100))
neon_logger.display('precision rate = %s!!' %
(str(precision_recall_rate[0])))
neon_logger.display('recall rate = %s!!' %
(str(precision_recall_rate[1])))
if __name__ == "__main__":
# parse the command line arguments
parser = NeonArgparser()
parser.set_defaults(epochs=200)
parser.add_argument('--data', type=validate_existing_filepath,
help='Path to the CSV file where the prepared dataset is saved')
parser.add_argument('--model_path', type=validate_parent_exists,
help='Path to save the model')
args = parser.parse_args()
data_path = absolute_path(args.data)
model_path = absolute_path(args.model_path)
# generate backend
be = gen_backend(batch_size=64)
# load data sets from file
data_set = NpSemanticSegData(data_path, train_to_test_ratio=0.8)
# train the mlp classifier
train_mlp_classifier(data_set, model_path, args.epochs, args.callback_args)
| 2.21875 | 2 |
functions/time.py | rageyboiii/test-boy | 5 | 12758073 | <gh_stars>1-10
# import asyncio
# import time
import cProfile
import pstats
import io
def timeit(func):
async def helper(*args, **kwargs):
pr = cProfile.Profile()
print('{}.time'.format(func.__name__))
pr.enable()
result = await func(*args, **kwargs)
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
return result
return helper
| 2.5 | 2 |
models/Models.py | keldkemp/TelegramBotHSE | 0 | 12758074 | <reponame>keldkemp/TelegramBotHSE<filename>models/Models.py
"""
Представление данных.
"""
class Pars:
def __init__(self, date, lesson, group, time, teacher):
self.date_lesson = date
self.lesson = lesson
self.group = group
self.time = time
self.teacher = teacher
| 2.21875 | 2 |
utils/query2dict.py | msamunetogetoge/AutoTrader | 1 | 12758075 | <gh_stars>1-10
def get_data(query):
"""[summary] make variable of chart.graphs.drawgraph.Graph().CustomDraw(kwargs) from querydict. The querydict from index.html
Args:
query ([type]): [description] querydict like <QueryDict: {'csrfmiddlewaretoken': ['<KEY>'], 'smaperiod1': ['26'], 'smaperiod2': ['52'], 'emaperiod1': ['7'], 'emaperiod2': ['14'], 'bbandN': ['20'], 'bbandk': ['2.0'], 'rsiperiod': ['14'], 'rsibuythread': ['30.0'], 'rsisellthread': ['70.0'], 'macdfastperiod': ['12'], 'macdslowperiod': ['26'], 'macdsignaleriod': ['9'], 'ichimokut': ['12'], 'ichimokuk': ['26'], 'ichimokus': ['52']}>
"""
kwargs = {}
kwargs["Sma"] = {"params": (int(query["smaperiod1"]), int(query["smaperiod2"]))}
kwargs["Ema"] = {"params": (int(query["emaperiod1"]), int(query["emaperiod2"]))}
kwargs["DEma"] = {"params": (int(query["demaperiod1"]), int(query["demaperiod2"]))}
kwargs["Bb"] = {"params": (int(query["bbandN"]), float(query["bbandk"]))}
kwargs["Rsi"] = {"params": (int(query["rsiperiod"]), float(query["rsibuythread"]), float(query["rsisellthread"]))}
kwargs["Macd"] = {"params": (int(query["macdfastperiod"]), int(query["macdslowperiod"]), int(query["macdsignalperiod"]))}
kwargs["Ichimoku"] = {"params": (int(query["ichimokut"]), int(query["ichimokuk"]), int(query["ichimokus"]))}
return kwargs
| 2.921875 | 3 |
plotly/validators/surface/colorbar/__init__.py | mprostock/plotly.py | 12 | 12758076 | from ._ypad import YpadValidator
from ._yanchor import YanchorValidator
from ._y import YValidator
from ._xpad import XpadValidator
from ._xanchor import XanchorValidator
from ._x import XValidator
from ._title import TitleValidator
from ._tickwidth import TickwidthValidator
from ._tickvalssrc import TickvalssrcValidator
from ._tickvals import TickvalsValidator
from ._ticktextsrc import TicktextsrcValidator
from ._ticktext import TicktextValidator
from ._ticksuffix import TicksuffixValidator
from ._ticks import TicksValidator
from ._tickprefix import TickprefixValidator
from ._tickmode import TickmodeValidator
from ._ticklen import TicklenValidator
from ._tickformatstopdefaults import TickformatstopValidator
from ._tickformatstops import TickformatstopsValidator
from ._tickformat import TickformatValidator
from ._tickfont import TickfontValidator
from ._tickcolor import TickcolorValidator
from ._tickangle import TickangleValidator
from ._tick0 import Tick0Validator
from ._thicknessmode import ThicknessmodeValidator
from ._thickness import ThicknessValidator
from ._showticksuffix import ShowticksuffixValidator
from ._showtickprefix import ShowtickprefixValidator
from ._showticklabels import ShowticklabelsValidator
from ._showexponent import ShowexponentValidator
from ._separatethousands import SeparatethousandsValidator
from ._outlinewidth import OutlinewidthValidator
from ._outlinecolor import OutlinecolorValidator
from ._nticks import NticksValidator
from ._lenmode import LenmodeValidator
from ._len import LenValidator
from ._exponentformat import ExponentformatValidator
from ._dtick import DtickValidator
from ._borderwidth import BorderwidthValidator
from ._bordercolor import BordercolorValidator
from ._bgcolor import BgcolorValidator
| 1.273438 | 1 |
views/image.py | sbutler/spacescout_web | 0 | 12758077 | """ Copyright 2012, 2013 UW Information Technology, University of Washington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.http import HttpResponse, HttpResponseNotFound
from spacescout_web.spot import SpotImage, SpotException
from spacescout_web.middleware.unpatch_vary import unpatch_vary_headers
def ImageView(request, spot_id, image_id, thumb_width=None, thumb_height=None, constrain=False):
try:
image = SpotImage(spot_id, request=request)
contenttype, img = image.get(image_id, constrain, thumb_width, thumb_height)
except SpotException as ex:
return HttpResponse(status=ex.status_code)
else:
response = HttpResponse(img, content_type=contenttype)
# Remove some headers that don't vary for images
unpatch_vary_headers(response, ['Cookie', 'X-Mobile', 'Accept-Language', 'User-Agent'])
return response
def MultiImageView(request, spot_id=None, image_ids=None, thumb_width=None, thumb_height=None, constrain=False):
try:
image = SpotImage(spot_id, request=request)
headers, img = image.get_multi(image_ids, constrain, thumb_width, thumb_height)
except SpotException as ex:
return HttpResponse(status=ex.status_code)
else:
response = HttpResponse(img)
response['Content-Type'] = headers['Content-Type']
response['Sprite-Offsets'] = headers['Sprite-Offsets']
# Remove some headers that don't vary for images
unpatch_vary_headers(response, ['Cookie', 'X-Mobile', 'Accept-Language', 'User-Agent'])
return response
| 1.78125 | 2 |
survey/forms.py | flynnguy/django-survey | 7 | 12758078 | from models import QTYPE_CHOICES, Answer, Survey, Question, Choice
from django.conf import settings
from django.forms import BaseForm, Form, ValidationError
from django.forms import CharField, ChoiceField, SplitDateTimeField,\
CheckboxInput, BooleanField,FileInput,\
FileField, ImageField
from django.forms import Textarea, TextInput, Select, RadioSelect,\
CheckboxSelectMultiple, MultipleChoiceField,\
SplitDateTimeWidget,MultiWidget, MultiValueField
from django.forms.forms import BoundField
from django.forms.models import ModelForm
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from django.template import Context, loader
from django.template.defaultfilters import slugify
from itertools import chain
import uuid
class BaseAnswerForm(Form):
def __init__(self, question, user, interview_uuid, session_key, edit_existing=False, *args, **kwdargs):
self.question = question
self.session_key = session_key.lower()
self.user = user
self.interview_uuid = interview_uuid
self.answer = None
initial = None
if edit_existing:
if not user.is_authenticated():
query = question.answers.filter(session_key=session_key)
else:
query = question.answers.filter(user=user)
if query.count():
self.answer = query[0]
initial = self.answer.text
if 'initial' not in kwdargs:
kwdargs['initial'] = {}
if 'answer' not in kwdargs['initial']:
kwdargs['initial']['answer'] = self.answer.text
super(BaseAnswerForm, self).__init__(*args, **kwdargs)
answer = self.fields['answer']
answer.required = question.required
answer.label = question.text
if not question.required:
answer.help_text = unicode(_('this question is optional'))
if initial is not None and initial != answer.initial:
if kwdargs['initial']['answer'] != answer.initial:
## rats.. we are a choice list style and need to map to id.
answer.initial = initial
def as_template(self):
"Helper function for fieldsting fields data from form."
bound_fields = [BoundField(self, field, name) for name, field in self.fields.items()]
c = Context(dict(form = self, bound_fields = bound_fields))
# TODO: check for template ... if template does not exist
# we could just get_template_from_string to some default
# or we could pass in the template name ... whatever we want
# import pdb; pdb.set_trace()
t = loader.get_template('forms/form.html')
return t.render(c)
def save(self, commit=True):
if not self.cleaned_data['answer']:
if self.fields['answer'].required:
raise ValidationError, _('This field is required.')
return
ans = self.answer
if ans is None:
ans = Answer()
ans.question = self.question
ans.session_key = self.session_key
if self.user.is_authenticated():
ans.user = self.user
else:
ans.user = None
ans.interview_uuid = self.interview_uuid
ans.text = self.cleaned_data['answer']
if commit: ans.save()
return ans
class TextInputAnswer(BaseAnswerForm):
answer = CharField()
class TextAreaAnswer(BaseAnswerForm):
answer = CharField(widget=Textarea)
class NullSelect(Select):
def __init__(self, attrs=None, choices=(), empty_label=u"---------"):
self.empty_label = empty_label
super(NullSelect, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=(), **kwdargs):
empty_choice = ()
# kwdargs is needed because it is the only way to determine if an
# override is provided or not.
if 'empty_label' in kwdargs:
if kwdargs['empty_label'] is not None:
empty_choice = ((u'', kwdargs['empty_label']),)
elif self.empty_label is not None:
empty_choice = ((u'', self.empty_label),)
base_choices = self.choices
self.choices = chain(empty_choice, base_choices)
result = super(NullSelect, self).render(name, value, attrs, choices)
self.choices = base_choices
return result
class ChoiceAnswer(BaseAnswerForm):
answer = ChoiceField(widget=NullSelect)
def __init__(self, *args, **kwdargs):
super(ChoiceAnswer, self).__init__(*args, **kwdargs)
choices = []
choices_dict = {}
self.initial_answer = None
for opt in self.question.choices.all().order_by("order"):
if opt.image and opt.image.url:
text = mark_safe(opt.text + '<br/><img src="%s"/>'%opt.image.url)
else:
text = opt.text
if self.answer is not None and self.answer.text == opt.text:
self.initial_answer = str(opt.id)
choices.append((str(opt.id),text))
choices_dict[str(opt.id)] = opt.text
self.choices = choices
self.choices_dict = choices_dict
self.fields['answer'].choices = choices
self.fields['answer'].initial = self.initial_answer
if self.initial_answer is not None:
self.initial['answer'] = self.initial_answer
def clean_answer(self):
key = self.cleaned_data['answer']
if not key and self.fields['answer'].required:
raise ValidationError, _('This field is required.')
return self.choices_dict.get(key, key)
class ChoiceRadio(ChoiceAnswer):
def __init__(self, *args, **kwdargs):
super(ChoiceRadio, self).__init__(*args, **kwdargs)
self.fields['answer'].widget = RadioSelect(choices=self.choices)
class ChoiceImage(ChoiceAnswer):
def __init__(self, *args, **kwdargs):
super(ChoiceImage, self).__init__(*args, **kwdargs)
#import pdb; pdb.set_trace()
self.choices = [ (k,mark_safe(v)) for k,v in self.choices ]
self.fields['answer'].widget = RadioSelect(choices=self.choices)
class ChoiceCheckbox(BaseAnswerForm):
answer = MultipleChoiceField(widget=CheckboxSelectMultiple)
def __init__(self, *args, **kwdargs):
super(ChoiceCheckbox, self).__init__(*args, **kwdargs)
choices = []
choices_dict = {}
self.initial_answer = None
for opt in self.question.choices.all().order_by("order"):
text = opt.text
if opt.image and opt.image.url:
text = mark_safe(opt.text + '<br />' + opt.image.url)
choices.append((str(opt.id),text))
choices_dict[str(opt.id)] = opt.text
if self.answer is not None and self.answer.text == opt.text:
self.initial_answer = str(opt.id)
self.choices = choices
self.choices_dict = choices_dict
self.fields['answer'].choices = choices
self.fields['answer'].initial = self.initial_answer
if self.initial_answer is not None:
self.initial['answer'] = self.initial_answer
def clean_answer(self):
keys = self.cleaned_data['answer']
if not keys and self.fields['answer'].required:
raise ValidationError, _('This field is required.')
for key in keys:
if not key and self.fields['answer'].required:
raise ValidationError, _('Invalid Choice.')
return [self.choices_dict.get(key, key) for key in keys]
def save(self, commit=True):
if not self.cleaned_data['answer']:
if self.fields['answer'].required:
raise ValidationError, _('This field is required.')
return
ans_list = []
for text in self.cleaned_data['answer']:
ans = Answer()
ans.question = self.question
ans.session_key = self.session_key
if self.user.is_authenticated():
ans.user = self.user
else:
ans.user = None
ans.interview_uuid = self.interview_uuid
ans.text = text
if commit: ans.save()
ans_list.append(ans)
return ans_list
## each question gets a form with one element, determined by the type
## for the answer.
QTYPE_FORM = {
'T': TextInputAnswer,
'A': TextAreaAnswer,
'S': ChoiceAnswer,
'R': ChoiceRadio,
'I': ChoiceImage,
'C': ChoiceCheckbox,
}
def forms_for_survey(survey, request, edit_existing=False):
## add session validation to base page.
sp = str(survey.id) + '_'
session_key = request.session.session_key.lower()
login_user = request.user
random_uuid = uuid.uuid4().hex
if request.POST: # bug in forms
post = request.POST
else:
post = None
# If there's a question with no answers, it raises a KeyError
# Let's just pretend there's no question if that happens.
try:
return [QTYPE_FORM[q.qtype](q, login_user, random_uuid, session_key, prefix=sp+str(q.id), data=post, edit_existing=edit_existing)
for q in survey.questions.all().order_by("order") ]
except KeyError:
return None
class CustomDateWidget(TextInput):
class Media:
js = ('/admin/jsi18n/',
settings.ADMIN_MEDIA_PREFIX + 'js/core.js',
settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js",
)
def __init__(self, attrs={}):
super(CustomDateWidget, self).__init__(attrs={'class': 'vDateField', 'size': '10'})
class CustomTimeWidget(TextInput):
class Media:
js = ('/admin/jsi18n/',
settings.ADMIN_MEDIA_PREFIX + 'js/core.js',
settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js",
)
def __init__(self, attrs={}):
super(CustomTimeWidget, self).__init__(attrs={'class': 'vTimeField', 'size': '8'})
class CustomSplitDateTime(SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [CustomDateWidget, CustomTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return mark_safe(u'<p class="datetime">%s %s<br />%s %s</p>' % \
(_('Date:'), rendered_widgets[0], _('Time:'), rendered_widgets[1]))
class SurveyForm(ModelForm):
opens = SplitDateTimeField(widget=CustomSplitDateTime(),
label=Survey._meta.get_field("opens").verbose_name)
closes = SplitDateTimeField(widget=CustomSplitDateTime(),
label=Survey._meta.get_field("closes").verbose_name)
class Meta:
model = Survey
exclude = ("created_by","editable_by","slug","recipient_type","recipient_id")
def clean(self):
title_slug = slugify(self.cleaned_data.get("title"))
if not hasattr(self,"instance"):
if not len(Survey.objects.filter(slug=title_slug))==0:
raise ValidationError, _('The title of the survey must be unique.')
elif self.instance.title != self.cleaned_data.get("title"):
if not len(Survey.objects.filter(slug=title_slug))==0:
raise ValidationError, _('The title of the survey must be unique.')
return self.cleaned_data
class QuestionForm(ModelForm):
class Meta:
model= Question
exclude = ("survey")
class ChoiceForm(ModelForm):
class Meta:
model = Choice
exclude = ("question")
| 2.171875 | 2 |
week6/test_add_function.py | johnehunt/computationalthinking | 1 | 12758079 | from add_function import add
def test_add_zeros():
result = add(0, 0)
assert result == 0
def test_add_one_and_zero():
result = add(1, 0)
assert result == 1
def test_add_zero_and_one():
result = add(0, 1)
assert result == 1
def test_add_one_and_one():
result = add(1, 1)
assert result == 2
def test_add_zero_and_minus_one():
result = add(0, -1)
assert result == -1
def test_add_minus_one_and_zero():
result = add(-1, 0)
assert result == -1
def test_add_minus_one_and_minus_one():
result = add(-1, -1)
assert result == -2 | 3.5 | 4 |
docker/test/integration/minifi/validators/SegfaultValidator.py | dtrodrigues/nifi-minifi-cpp | 113 | 12758080 | from .OutputValidator import OutputValidator
class SegfaultValidator(OutputValidator):
"""
Validate that a file was received.
"""
def validate(self):
return True
| 2.5625 | 3 |
main_app/migrations/0014_auto_20210429_1825.py | Jonak-Adipta-Kalita/JAK-Website | 1 | 12758081 | # Generated by Django 3.1.7 on 2021-04-29 12:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("main_app", "0013_notfication"),
]
operations = [
migrations.AlterField(
model_name="notfication",
name="notification_name",
field=models.CharField(default="", max_length=50),
),
]
| 1.484375 | 1 |
examples/1827405109.py | lobo0616/bysj | 1 | 12758082 | # 学号:1827405109
# 姓名:郑悦薇
# IP:192.168.157.135
# 上传时间:2018/11/12 14:11:08
import math
def func1(a,b):
if a <= 0 or b < a:
return
f1_int_res = 1
f1_int_sum = 0
for i in range(a, b+1):
f1_int_res *= i
while f1_int_res % 10 == 0:
f1_int_sum += 1
f1_int_res //= 10
return f1_int_sum
def func2(a,b):
f2_int_sum = 0
if a <= 0 or b <= 0:
return
if a<=b:
for i in range(a, b+1):
if phw(i) == True:
f2_int_sum += 1
else:
for i in range(b, a+1):
if phw(i) == True:
f2_int_sum += 1
return f2_int_sum
def phw(x):
phw_int_res = 0
x=int(x)
phw_int_zhz = x
while x:
phw_int_res = phw_int_res * 10 + x % 10
x //= 10
return phw_int_res == phw_int_zhz
def func3(lst):
f3_list_res=[]
for i in lst:
if i % 3 and i >= 0:
f3_list_res.append(i)
f3_list_res.sort()
return f3_list_res
if __name__=="__main__":
pass | 3.875 | 4 |
Module04/constants.py | geiyer/cis189-python | 2 | 12758083 | PI = 3.14
SALES_TAX = 6
if __name__ == '__main__':
print('Constant file directly executed')
else:
print('Constant file is imported') | 1.726563 | 2 |
okl4_kernel/okl4_2.1.1-patch.9/tools/SCons/Warnings.py | CyberQueenMara/baseband-research | 77 | 12758084 | #
# Copyright (c) 2001, 2002, 2003, 2004 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""SCons.Warnings
This file implements the warnings framework for SCons.
"""
__revision__ = "/home/scons/scons/branch.0/baseline/src/engine/SCons/Warnings.py 0.96.93.D001 2006/11/06 08:31:54 knight"
import SCons.Errors
class Warning(SCons.Errors.UserError):
pass
class CacheWriteErrorWarning(Warning):
pass
class CorruptSConsignWarning(Warning):
pass
class DependencyWarning(Warning):
pass
class DeprecatedWarning(Warning):
pass
class DuplicateEnvironmentWarning(Warning):
pass
class MissingSConscriptWarning(Warning):
pass
class NoParallelSupportWarning(Warning):
pass
class ReservedVariableWarning(Warning):
pass
class MisleadingKeywordsWarning(Warning):
pass
_warningAsException = 0
# The below is a list of 2-tuples. The first element is a class object.
# The second element is true if that class is enabled, false if it is disabled.
_enabled = []
_warningOut = None
def suppressWarningClass(clazz):
"""Suppresses all warnings that are of type clazz or
derived from clazz."""
_enabled.insert(0, (clazz, 0))
def enableWarningClass(clazz):
"""Suppresses all warnings that are of type clazz or
derived from clazz."""
_enabled.insert(0, (clazz, 1))
def warningAsException(flag=1):
"""Turn warnings into exceptions. Returns the old value of the flag."""
global _warningAsException
old = _warningAsException
_warningAsException = flag
return old
def warn(clazz, *args):
global _enabled, _warningAsException, _warningOut
warning = clazz(args)
for clazz, flag in _enabled:
if isinstance(warning, clazz):
if flag:
if _warningAsException:
raise warning
if _warningOut:
_warningOut(warning)
break
| 1.984375 | 2 |
python/qitoolchain/actions/convert_package.py | PrashantKumar-sudo/qibuild | 0 | 12758085 | <filename>python/qitoolchain/actions/convert_package.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Convert a binary archive into a qiBuild package. """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import qisys
import qisys.parsers
from qisys import ui
from qitoolchain.convert import convert_package, convert_from_conan, conan_json_exists
from qitoolchain.conan import Conan
def configure_parser(parser):
"""Configure parser for this action """
qisys.parsers.default_parser(parser)
parser.add_argument("--name", required=True, help="The name of the package")
parser.add_argument("--version", help="The name of the package")
parser.add_argument("package_path", metavar='PACKAGE_PATH',
help="The path to the archive or conan directory to be converted")
parser.add_argument("--batch", dest="interactive", action="store_false",
help="Do not prompt for cmake module edition")
parser.add_argument("--conan", action="store_true",
help="Define if we work on a conan package")
parser.add_argument("--conan-shared", dest="shared", action="store_true",
help="Set to get the shared version of the conan library")
parser.add_argument("--conan-static", dest="static", action="store_true",
help="Set to get the static version of the conan library")
parser.add_argument("--conan-channel", dest='channels', action='append',
help="conan channel of the conan packages to be converted, could be used multiple times")
parser.set_defaults(interactive=True, version="0.0.1")
def do(args):
""" Convert a binary archive into a qiBuild package. """
name = args.name
interactive = args.interactive
package_path = args.package_path
if args.conan:
shared = None
if args.shared or args.static:
msg = "--conan-shared and --conan-static are mutualy exlusive, please remove one of them."
assert args.shared != args.static, msg
if args.shared is True:
shared = True
if args.static is True:
shared = False
conan = Conan(args.name, args.version, args.channels, shared)
if not conan_json_exists(package_path):
package_path = conan.create()
ui.info("Converting Conan package", package_path, "into a qiBuild package")
res = convert_from_conan(package_path, name, args.version)
else:
ui.info("Converting", package_path, "into a qiBuild package")
res = convert_package(package_path, name, interactive=interactive)
message = """Conversion succeeded.\n\nqiBuild package:\n {0}\n
You can add this qiBuild package to a toolchain using:
qitoolchain add-package -c <config name> {0}
or
qitoolchain add-package -t <toolchain name> {0}""".format(res)
qisys.ui.info(message)
return res
| 1.90625 | 2 |
test/test_regressions.py | mattharrison/rst2odp | 10 | 12758086 |
import imp
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
from docutils.readers import standalone
from docutils.core import Publisher, default_description, \
default_usage
import quicktest
from types import ModuleType
#rst2odp = ModuleType('rst2odp')
#exec open('../bin/rst2odp') in rst2odp.__dict__
try:
rst2odp = imp.load_source('rst2odp', '../bin/rst2odp')
except IOError:
rst2odp = imp.load_source('rst2odp', 'bin/rst2odp')
from odplib import preso, zipwrap
class TestRegressions(unittest.TestCase):
def _to_odp_content(self, rst, xml_filename, odp_name='/tmp/out'):
reader = standalone.Reader()
reader_name = 'standalone'
writer = rst2odp.Writer()
writer_name = 'pseudoxml'
parser = None
parser_name = 'restructuredtext'
settings = None
settings_spec = None
settings_overrides = None
config_section = None
enable_exit_status = 1
usage = default_usage
publisher = Publisher(reader, parser, writer,# source=StringIO(rst),
settings=settings,
destination_class=rst2odp.BinaryFileOutput)
publisher.set_components(reader_name, parser_name, writer_name)
description = ('Generates OpenDocument/OpenOffice/ODF slides from '
'standalone reStructuredText sources. ' + default_description)
fin = open('/tmp/in.rst', 'w')
fin.write(rst)
fin.close()
argv = ['--traceback', '/tmp/in.rst', odp_name]
output = publisher.publish(argv, usage, description, settings_spec, settings_overrides, config_section=config_section, enable_exit_status=enable_exit_status)
# pull content.xml out of /tmp/out
z = zipwrap.Zippier(odp_name)
fout = open(xml_filename, 'w')
content = preso.pretty_xml(z.cat('content.xml'))
fout.write(content)
fout.close()
return content
def check_output(self, rst, desired, filename='/tmp/foo.xml', outname='/tmp/out'):
content = self._to_odp_content(rst, filename, odp_name=outname)
tree = self.get_tree(rst)
self.assertTrue(_contains_lines(content, desired), "%s should have %s \nTree: %s" %(content, desired, tree))
def get_tree(self, rst):
parser = quicktest.Parser()
input = rst
source_path='test file'
settings = quicktest.OptionParser(components=(quicktest.Parser,)).get_default_values()
document = quicktest.new_document(source_path, settings)
parser.parse(input, document)
format = 'pretty'
optargs = {'debug': 0, 'attributes': 0}
output = quicktest.format(format, input, document, optargs)
return output
def test_basic(self):
rst = """
Title
-----
hello world
"""
desired = """<text:p text:style-name="P1">hello world</text:p>"""
self.check_output(rst, desired, outname='/tmp/basic.odp')
def test_link(self):
rst = """
Title
-----
https://github.com/talkpython/illustrated-python-3-course
"""
desired = """ <draw:text-box>
<text:p text:style-name="P1">https://github.com/talkpython/illustrated-python-3-course</text:p>
</draw:text-box>"""
self.check_output(rst, desired, outname='/tmp/link.odp')
def test_2_paragraphs(self):
rst = """
2 para
-------
Hello
World
"""
desired = """<text:p text:style-name="P1">Hello</text:p>
<text:p text:style-name="P1">World</text:p>"""
self.check_output(rst, desired, '/tmp/2para.xml', '/tmp/2para.odp')
def test_mono_block(self):
rst = """
From script
------------
Make file ``hello.py`` with ::
print "hello world"
Run with::
python hello.py
"""
desired='''<text:p text:style-name="P1">
Make file
<text:s/>
<text:span text:style-name="T0">hello.py</text:span>
with
</text:p>
<text:p text:style-name="P1">
<text:span text:style-name="T0">
print "hello world"
<text:line-break/>
</text:span>
</text:p>'''
self.check_output(rst, desired, '/tmp/monoblock.xml', outname='/tmp/monoblock.odp')
def tes2t_code_block(self):
rst = """
``id``
--------
.. code-block:: pycon
>>> a = 4
>>> id(a)
"""
desired='bad'
self.check_output(rst, desired, '/tmp/code.xml')
def te2st_code_block_with_space(self):
rst = """
``id``
--------
.. code-block:: pycon
>>> a = 4
>>> id(a)
"""
desired='bad'
self.check_output(rst, desired, '/tmp/code.xml')
def test_vert_spacing_42(self):
rst = '''Doc attribute exploration
=========================
:Author: <NAME>
:Email: <EMAIL>
:Institute: Data Science Institute, ICL
:Date: 2017-11-30
:Twitter: @mehere
:Organisation: My university
:Tagline: Only connect
.. email address is rendered as a hyperlinked "<EMAIL>"
'''
desired = '''<draw:text-box>
<text:p text:style-name="P0">
<text:span text:style-name="T0"><NAME></text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0"><EMAIL></text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">Data Science Institute, ICL</text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">2017-11-30</text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">@mehere</text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">My university</text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">Only connect</text:span>
</text:p>
</draw:text-box>'''
self.check_output(rst, desired, '/tmp/code.xml')
def test_email_43(self):
rst = """Doc attribute exploration
=========================
:Author: <NAME>
:Email: <EMAIL>
:Institute: Data Science Institute, ICL
:Date: 2017-11-30
.. email address is rendered as a hyperlinked "<EMAIL>"
"""
desired = '''<draw:text-box>
<text:p text:style-name="P0">
<text:span text:style-name="T0">Ann Author</text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0"><EMAIL></text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">Data Science Institute, ICL</text:span>
</text:p>
<text:p text:style-name="P0">
<text:span text:style-name="T0">2017-11-30</text:span>
</text:p>
</draw:text-box>'''
self.check_output(rst, desired, '/tmp/code.xml')
def test_from_script(self):
rst = """From script
------------
Make file ``hello.py`` with::
print("hello world")
Run with:
.. code-block:: console
$ python3 hello.py
"""
desired = '''<draw:text-box>
<text:p text:style-name="P1">
Make file
<text:s/>
<text:span text:style-name="T0">hello.py</text:span>
with:
</text:p>
<text:p text:style-name="P1">
<text:span text:style-name="T0">
print("hello world")
<text:line-break/>
</text:span>
</text:p>
<text:p text:style-name="P1">Run with:</text:p>
<text:p text:style-name="P1">
<text:span text:style-name="T1">$</text:span>
<text:span text:style-name="T0">
<text:s/>
python3
<text:s/>
hello.py
<text:line-break/>
</text:span>
</text:p>
</draw:text-box>'''
self.check_output(rst, desired, '/tmp/code.xml')
def test_textbox_with_size(self):
rst = """
Who Created Python?
-------------------
.. grid:: 2,2x1
Python was created by Dutch programmer <NAME> in 1989.
He wanted to create a tool to allow for easy scripting
.. class:: font-size:8pt
.. textbox:: {"x": "2cm", "y": "18.2cm", "width": "25cm"}
Image via https://en.wikipedia.org/wiki/Guido_van_Rossum
"""
desired = 'foo'
self.check_output(rst, desired, '/tmp/code.xml')
def test_normal_sized_styled_before_code(self):
rst ="""
txt before code
----------------
.. class:: normal
foo
.. class:: normal
.. code-block:: python
a = 3
"""
desired='''<text:p text:style-name="P1">
<text:span text:style-name="T1">
a
<text:s/>
</text:span>
<text:span text:style-name="T3">=</text:span>
<text:span text:style-name="T1">
<text:s/>
</text:span>
<text:span text:style-name="T3">3</text:span>
<text:span text:style-name="T1">
<text:line-break/>
</text:span>
</text:p>'''
self.check_output(rst, desired, '/tmp/code.xml')
def te2st_styled_before_code(self):
rst ="""
txt before code
----------------
.. class:: large
foo
.. class:: large
.. code-block:: python
a = 3
"""
desired='''<text:p text:style-name="P1">
foo
</text:p>
<text:p text:style-name="P1">
<text:span text:style-name="T0">
a
<text:s/>
</text:span>
<text:span text:style-name="T1">
=
</text:span>
<text:span text:style-name="T0">
<text:s/>
</text:span>
<text:span text:style-name="T1">
3
</text:span>
<text:span text:style-name="T0">
<text:line-break/>
</text:span>
</text:p>
'''
self.check_output(rst, desired, '/tmp/code2.xml')
def _contains_lines(haystack, needle, ignore_whitespace=True):
"""
>>> _contains_lines(range(4), range(1,2))
True
>>> _contains_lines(range(4), range(1,5))
False
"""
if isinstance(haystack, str):
haystack = haystack.split('\n')
if isinstance(needle, str):
needle = needle.split('\n')
if ignore_whitespace:
haystack = [str(x).strip() for x in haystack]
needle = [str(x).strip() for x in needle]
for i, line in enumerate(haystack):
if needle[0] == line and haystack[i:i+len(needle)] == needle:
return True
return False
if __name__ == '__main__':
unittest.main()
# import doctest
# doctest.testmod()
| 2.078125 | 2 |
pbfhr/meshes/generate_core_with_reflectors.py | miaoyinb/virtual_test_bed | 19 | 12758087 | #!python
# 2-D mesh of the PB-FHR reactor, consisting of the core and reflectors
import numpy as np
import sys
import os
# append path to module so cubit can find it
sys.path.append(os.getcwd())
import pbfhr as fhr
# whether to include orificing along the outflow boundary
plenum = True
# meshing scheme
scheme = 'Map'
# approximate element size
# dx = 0.06
cubit.cmd('reset')
################################################################################
# Surface (volume) definitions
################################################################################
# Current index and dictionnary of indices
index = 1
ind = {}
####
# Inner reflector (surface 1, block 1)
# Create left most vertices
cubit.cmd('create vertex 0.01 ' + str(fhr.geometry['inner_cr_channel']['z'][0]) + ' 0')
cubit.cmd('create vertex 0.01 ' + str(fhr.geometry['inner_cr_channel']['z'][-1]) + ' 0')
ind['center_axis'] = index
index += 2
# Create vertices defining the inner reflector
ind['inner_reflector'] = index
for i in [0, fhr.geometry['inner_cr_channel']['n_vertices'] - 1]: #range(fhr.geometry['inner_cr_channel']['n_vertices']):
cubit.cmd('create vertex ' + str(fhr.geometry['inner_cr_channel']['r'][i]) + ' ' + str(fhr.geometry['inner_cr_channel']['z'][i]) + ' 0')
index += 1
# Create surface from those vertices
vertices = "4 3 1 2"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Control rod channel (surface 2, block 2)
# Create vertices at the right of the channel
cubit.cmd('create vertex 0.35 ' + str(fhr.geometry['inner_cr_channel']['z'][0]) + ' 0')
cubit.cmd('create vertex 0.35 ' + str(fhr.geometry['inner_cr_channel']['z'][-1]) + ' 0')
ind['inner_cr_channel'] = index
index += 2
# Create vertices where the channel touches the active region, as well as the other part of the reflector
for i in range(fhr.geometry['inner_radius']['n_vertices']):
cubit.cmd('create vertex ' + str(fhr.geometry['inner_radius']['r'][i]) + ' ' + str(fhr.geometry['inner_radius']['z'][i]) + ' 0')
index += fhr.geometry['inner_radius']['n_vertices']
# Make the control rod channel surface
vertices = "3 4 6 12 11 10 5"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Inner reflector outside of the control rod channel (surface 3 & 4, block 1)
# Bottom part
vertices = "10 5 7 8 9"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('merge all')
cubit.cmd('compress all')
# Top part
vertices = "14 13 12 6"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Fueled active region (surface 5, block 3)
# Create the vertices between the fuel and the reflector pebbles
ind['active_region'] = index
for i in range(fhr.geometry['middle_radius']['n_vertices']):
cubit.cmd('create vertex ' + str(fhr.geometry['middle_radius']['r'][i]) + ' ' + str(fhr.geometry['middle_radius']['z'][i]) + ' 0')
index += fhr.geometry['middle_radius']['n_vertices']
# create surface from vertices defining the active region
vertices = "7 8 16 15"
cubit.cmd('create surface vertex ' + vertices)
vertices = "8 9 17 16"
cubit.cmd('create surface vertex ' + vertices)
vertices = "9 10 18 17"
cubit.cmd('create surface vertex ' + vertices)
vertices = "10 11 19 18"
cubit.cmd('create surface vertex ' + vertices)
vertices = "11 12 20 19"
cubit.cmd('create surface vertex ' + vertices)
vertices = "12 13 21 20"
cubit.cmd('create surface vertex ' + vertices)
vertices = "13 14 22 21"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('compress all')
cubit.cmd('merge all')
# ####
# # Reflector/blanket pebbles region (surface 6, block 4)
ind['pebble_reflector'] = index
for i in range(fhr.geometry['outer_radius']['n_vertices']):
cubit.cmd('create vertex ' + str(fhr.geometry['outer_radius']['r'][i]) + ' ' + str(fhr.geometry['outer_radius']['z'][i]) + ' 0')
index += fhr.geometry['outer_radius']['n_vertices']
cubit.cmd('compress all')
cubit.cmd('merge all')
vertices = "15 16 24 23"
cubit.cmd('create surface vertex ' + vertices)
vertices = "16 17 18 19 25 24"
cubit.cmd('create surface vertex ' + vertices)
vertices = "19 20 26 25"
cubit.cmd('create surface vertex ' + vertices)
if (not plenum):
vertices = "20 21 27 26"
else:
cubit.cmd('create vertex 0.94333 4.42014 0')
cubit.cmd('compress all')
cubit.cmd('merge all')
index += 1
vertices = "26 39 27 21 20" # should be 29!
cubit.cmd('create surface vertex ' + vertices)
vertices = "21 22 28 27"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('compress all')
cubit.cmd('merge all')
# ####
# # Plenum
if plenum:
# split the curve on the right to permit different boundary condition specifications.
# First we need to split the curve according which part of the boundary is outflow.
# cubit.cmd('split curve 40 fraction ' + str(1.0 - fhr.bcs['outflow_h_fraction']))
# cubit.cmd('merge all')
# cubit.cmd('compress all')
#
# Create the rest of the plenum outlet
#TODO Create orifices for the connection to the plenum
#TODO Add a component limiting flow from the plenum to reflector
# We have not found specifications, so all of this is assumed
cubit.cmd('create vertex 1.05 4.47017 0')
cubit.cmd('create vertex 1.05 5.3125 0')
cubit.cmd('create vertex 1.25 5.3125 0')
cubit.cmd('create vertex 1.25 4.47017 0')
cubit.cmd('compress all')
cubit.cmd('merge all')
#
index += 4
vertices = '29 30 33 26'
cubit.cmd('create surface vertex ' + vertices)
vertices = '30 31 32 33'
cubit.cmd('compress all')
cubit.cmd('merge all')
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('compress all')
cubit.cmd('merge all')
# ####
# # Outer reflector surface (surface 7, block 5)
#
# # create vertices defining the outer reflector and surface
ind['outer_reflector'] = index
cubit.cmd('create vertex ' + str(fhr.geometry['barrel']['inner_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][-1]) + ' 0')
cubit.cmd('create vertex ' + str(fhr.geometry['barrel']['inner_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][0]) + ' 0')
index += 2
cubit.cmd('compress all')
cubit.cmd('merge all')
if not plenum:
vertices = "28 27 26 25 24 23 30 29"
cubit.cmd('create surface vertex ' + vertices)
else:
# Between defueling chute and plenum
vertices = "28 27 29 30 31"
cubit.cmd('create surface vertex ' + vertices)
# Outside of plenum
vertices = "35 34 32 33 26 25 24 23"
cubit.cmd('create surface vertex ' + vertices)
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Core barrel (surface 8, block 6)
cubit.cmd('create vertex ' + str(fhr.geometry['barrel']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][-1]) + ' 0')
cubit.cmd('create vertex ' + str(fhr.geometry['barrel']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][0]) + ' 0')
ind['core_barrel'] = index
index += 2
cubit.cmd('create surface vertex '+str(ind['outer_reflector']+1)+' '+str(ind['outer_reflector'])+' '+\
str(ind['core_barrel'])+' '+str(ind['core_barrel']+1))
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Downcomer (surface 9, block 7)
cubit.cmd('create vertex ' + str(fhr.geometry['downcomer']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][-1]) + ' 0')
cubit.cmd('create vertex ' + str(fhr.geometry['downcomer']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][0]) + ' 0')
ind['downcomer'] = index
index += 2
cubit.cmd('create surface vertex '+str(ind['core_barrel']+1)+' '+str(ind['core_barrel'])+' '+\
str(ind['downcomer'])+' '+str(ind['downcomer']+1))
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Vessel (surface 10, block 8)
cubit.cmd('create vertex ' + str(fhr.geometry['vessel']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][-1]) + ' 0')
cubit.cmd('create vertex ' + str(fhr.geometry['vessel']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][0]) + ' 0')
ind['vessel'] = index
index += 2
cubit.cmd('create surface vertex '+str(ind['downcomer']+1)+' '+str(ind['downcomer'])+' '+\
str(ind['vessel'])+' '+str(ind['vessel']+1))
cubit.cmd('merge all')
cubit.cmd('compress all')
####
# Fire bricks (surface 11, block 9)
cubit.cmd('create vertex ' + str(fhr.geometry['bricks']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][-1]) + ' 0')
cubit.cmd('create vertex ' + str(fhr.geometry['bricks']['outer_radius']) + ' ' + str(fhr.geometry['outer_radius']['z'][0]) + ' 0')
ind['bricks'] = index
index += 2
cubit.cmd('create surface vertex '+str(ind['vessel']+1)+' '+str(ind['vessel'])+' '+\
str(ind['bricks'])+' '+str(ind['bricks']+1))
cubit.cmd('merge all')
cubit.cmd('compress all')
################################################################################
# Mesh generation
################################################################################
refinement = 1
# Set intervals for each curves
# Oulet regions
cubit.cmd('curve 16 30 interval '+str(4*refinement))
cubit.cmd('curve 40 interval '+str(3*refinement))
cubit.cmd('curve 41 interval '+str(refinement))
cubit.cmd('curve 15 32 43 interval '+str(5*refinement))
# Main pebble region
cubit.cmd('curve 7 28 39 interval '+str(10*refinement))
# Inlet regions
cubit.cmd('curve 8 26 interval '+str(2*refinement))
cubit.cmd('curve 14 24 interval '+str(refinement))
cubit.cmd('curve 13 22 interval '+str(refinement))
cubit.cmd('curve 12 19 interval '+str(3*refinement))
# Plenum bottom
cubit.cmd('curve 45 interval '+str(refinement))
cubit.cmd('curve 47 interval '+str(2*refinement))
# cubit.cmd('surface 17 size '+str(0.02/refinement**2))
# Plenum top
cubit.cmd('curve 48 50 interval '+str(6*refinement))
cubit.cmd('curve 46 49 interval '+str(2*refinement))
# Inner reflector side
# cubit.cmd('curve 6 interval 5')
# cubit.cmd('curve 1 3 interval 30')
# cubit.cmd('curve 9 interval 5')
cubit.cmd('curve 2 4 interval '+str(refinement))
cubit.cmd('curve 10 5 interval '+str(refinement))
# cubit.cmd('curve 11 17 interval 1')
# Horizontal for flow regions
cubit.cmd('curve 31 29 27 25 23 21 18 20 interval '+str(3*refinement))
cubit.cmd('curve 35 33 36 38 43 42 interval '+str(refinement))
# mesh the entire domain
# cubit.cmd('surface 1 2 3 4 5 6 7 8 9 10 11 12 size ' + str(dx))
cubit.cmd('surface 1 2 3 4 5 6 7 8 9 10 11 12 scheme ' + scheme)
cubit.cmd('block 1 surface 1 3 4') # inner reflector
cubit.cmd('block 2 surface 2') # control rod channel
cubit.cmd('block 3 surface 5 6 7 8 9 10 11') # core
cubit.cmd('block 4 surface 12 13 14 15 16') # pebble reflector
cubit.cmd('block 5 surface 17 18') # plenum
cubit.cmd('block 6 surface 19 20') # outer reflector
cubit.cmd('block 7 surface 21') # core barrel
cubit.cmd('block 8 surface 22') # downcomer
cubit.cmd('block 9 surface 23') # vessel
cubit.cmd('block 10 surface 24') # bricks
cubit.cmd('mesh surface 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24')
################################################################################
# Curves and sideset definitions
################################################################################
# Inflow
cubit.cmd('sideset 101 curve 20 wrt surface 5')
cubit.cmd('sideset 101 curve 35 wrt surface 12')
cubit.cmd('sideset 101 name "bed_horizontal_bottom"')
# Outflow
cubit.cmd('sideset 102 curve 31 wrt surface 11')
cubit.cmd('sideset 102 curve 43 wrt surface 16')
cubit.cmd('sideset 102 name "bed_horizontal_top"')
cubit.cmd('sideset 103 curve 49 wrt surface 17')
cubit.cmd('sideset 103 name "plenum_top"')
# Inner
cubit.cmd('sideset 104 curve 12 13 14 7 8 15 16 wrt volume 3')
cubit.cmd('sideset 104 name "bed_left"')
# Right-most boundary
cubit.cmd('sideset 105 curve 65 wrt volume 9')
cubit.cmd('sideset 105 name "brick_surface"')
################################################################################
# Refine some interfaces
################################################################################
# interfaces between _large_ changes in porosity
# porosity_interfaces = '14 15 16 21 23 25'
#
# cubit.cmd('refine curve ' + porosity_interfaces + ' numsplit 1 bias 1.0 depth 2 smooth')
#
# cubit.cmd('surface 1 2 3 4 5 smooth scheme condition number beta 1.2 cpu 10')
#
# more_refinement = '4 34 35 25 30 1 15 16'
#
# cubit.cmd('refine curve ' + more_refinement + ' numsplit 1 bias 1.0 depth 1 smooth')
#
# # refine on angled section near bottom
# cubit.cmd('refine curve 2 numsplit 1 bias 1.0 depth 5 smooth')
#
# # curves near the inlet where there is a rapid redistribution of the flow
# inlet_refinement = '12 13 14 39'
# cubit.cmd('refine curve ' + inlet_refinement + ' numsplit 1 bias 1.0 depth 2 smooth')
# if (orifice == False):
# curves = fhr.number_string(1, 6, 1) + ' ' + fhr.number_string(12, 18, 1)
# cubit.cmd('refine curve ' + curves + ' 25 26 numsplit 1 bias 1.0 depth 2 smooth')
# cubit.cmd('surface 1 2 3 4 smooth scheme condition number beta 1.2 cpu 10')
# else:
# curves = fhr.number_string(1, 6, 1) + ' ' + fhr.number_string(12, 17, 1) + fhr.number_string(24, split_curves[-1], 1)
# cubit.cmd('refine curve ' + curves + ' numsplit 1 bias 1.0 depth 2 smooth')
# cubit.cmd('surface 1 2 smooth scheme condition number beta 1.2 cpu 10')
# curves = fhr.number_string(7, 11, 1)
# cubit.cmd('refine curve ' + curves + ' numsplit 1 bias 1.0 depth 1 smooth')
# cubit.cmd('surface 1 2 smooth scheme condition number beta 1.2 cpu 10')
################################################################################
# Output
################################################################################
# Select file name
filename = 'core_pronghorn'
# Mesh size is controlled by intervals
# filename += "_" + str(dx)
# save file
cubit.cmd('set large exodus file on')
cubit.cmd('export Genesis "/Users/giudgl/projects/virtual_test_bed/pbfhr/meshes/' + filename + '.e" dimension 2 overwrite')
| 2.15625 | 2 |
tests/views.py | geelweb/geelweb-django-contactform | 2 | 12758088 | from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return HttpResponse('Page content')
def custom(request):
return render(request, 'custom.html', {})
| 1.757813 | 2 |
tests/input_scripts/rewrite.py | shiba6v/shape_commentator | 65 | 12758089 | import numpy as np
a = np.array([1,2,3,4,5,6]) #_ rewrite it!
b = np.array([0,1,2,3,4,5]) #_ rewrite it!
| 3.046875 | 3 |
microservices/auth/run.py | boggda/test_forum | 0 | 12758090 | from src import auth
auth.run()
| 0.984375 | 1 |
federatedscope/vertical_fl/worker/__init__.py | alibaba/FederatedScope | 9 | 12758091 | from federatedscope.vertical_fl.worker.vertical_client import vFLClient
from federatedscope.vertical_fl.worker.vertical_server import vFLServer
__all__ = ['vFLServer', 'vFLClient'] | 1.367188 | 1 |
views.py | Soul-Code/JustSearchBackend | 2 | 12758092 | from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .models import Question, Answer, UserData, Team, TimeTable
from django.db.models import Q, Sum
from django.core.paginator import Paginator
from django.core import serializers
import django.utils.timezone as timezone
import time
import json
import base64
import random
import requests
# from bs4 import BeautifulSoup
session = requests.Session()
url_login = 'https://www.shuhelper.cn/api/users/login/'
headers = {'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'Keep-Alive',
'Content-Type': 'application/json;charset=UTF-8',
'Host': 'www.shuhelper.cn',
'Origin': 'https://www.shuhelper.cn',
'Pragma': 'no-cache',
'Referer': 'https://www.shuhelper.cn/login',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/69.0.3497.100 Safari/537.36'}
# 报名系统api
def index(request, string=''):
print(string)
# if string == 'Beta':
userid = request.session.get('userid')
msg = {}
if userid:
user = UserData.objects.filter(id=userid)
if user.exists():
user = user.first()
# todo 通知的处理逻辑太过于智障
# 团体赛通知
# if user.isPromoted:
# msg = {'msg': '恭喜你成功晋级!/恭喜你成功晋级,请到1106休息等待最终的决赛!'}
# else:
# msg = {'msg': '还有复活赛!/很遗憾你没有成功晋级,但是还有复活赛,还有机会进入决赛!'}
# 复活赛通知
# if user.isPromoted:
# msg = {'msg': '恭喜你成功晋级!/恭喜你成功晋级,请到指定机房参加最终的决赛!'}
# else:
# msg = {'msg': '很遗憾!/你与决赛失之交臂……'}
# 个人赛通知
# if user.isPromoted:
# msg = {'msg': '等待结果!/请等待工作人员对比赛情况进行最终审核!'}
# else:
# msg = {'msg': '等待结果!/请等待工作人员对比赛情况进行最终审核……'}
return render(request, 'JustSearch/index.html', msg)
# return render(request, 'JustSearch/index.html')
# return HttpResponse('不在比赛时间')
@csrf_exempt
def login_view(request):
res_data = {'isOk': False, 'errmsg': '未知错误'}
if request.method == 'POST' and request.is_ajax():
if not request.body.decode():
json_data = json.dumps(res_data)
return HttpResponse(json_data, content_type="application/json")
data = json.loads(request.body.decode())
if data.get('isAgree'):
# 必须同意比赛准则
username = data.get('username')
psword = data.get('password')
if username and psword:
# 登陆
# 检查数据库有没有该用户
user = UserData.objects.filter(
stdid=username,
)
if user.exists():
# 找到该用户 验证密码
user = user.first()
print('查到', user)
res = login_std(username, psword)
if not res:
# 密码错误
res_data['errmsg'] = '用户名或密码错误'
else:
# 密码验证成功
# 绑定到session
request.session['userid'] = user.id
res_data['isOk'] = True
res_data['userInfo'] = getUserInfo(user.id)
else:
# 没有找到该用户 验证用户名和密码创建用户
res = login_std(username, psword)
if res:
res_data['errmsg'] = 'ok'
res_data['isOk'] = True
res_data['name'] = res.get('name')
print('登陆成功', res)
# 写入数据库
user_now = UserData.objects.create(
stdid=username,
name=res.get('name')
)
request.session['userid'] = user_now.id
else:
print('登陆失败')
res_data['errmsg'] = '用户名或密码错误'
else:
res_data['errmsg'] = '用户名或密码为空'
else:
res_data['errmsg'] = '必须同意比赛准则'
return JsonResponse(res_data)
@csrf_exempt
def new_team(request):
# todo 这边截止报名是一次性的……待更正……
res_data = {'isOk': False, 'errmsg': '报名已经截止!'}
if request.method == 'POST':
# user = isAuthed(request)
# if not user:
# res_data['errmsg'] = '获取授权信息失败'
# JsonResponse(res_data)
# data = json.loads(request.body.decode())
# teamname = data.get('teamname')
# if Team.objects.filter(name=teamname).exists():
# res_data['errmsg'] = '队伍名称已存在'
# else:
# print(user, '创建队伍', data)
# if user.team:
# res_data['errmsg'] = '你已经有队伍了哟'
# else:
# user.team = Team.objects.create(
# name=data.get('teamname'),
# leader=user.name
# )
# user.save()
# request.session['teamid'] = user.team.id
#
# res_data['isOk'] = True
# res_data['userInfo'] = getUserInfo(user.id)
return JsonResponse(res_data)
@csrf_exempt
def get_team(request):
res_data = {'isOk': False, 'errmsg': '未知错误'}
if request.method == 'POST':
userid = request.session.get('userid')
if userid:
if UserData.objects.filter(id=userid).exists():
res_data['isOk'] = True
res_data['userInfo'] = getUserInfo(userid)
print(res_data['userInfo'])
# 已经获得登陆授权~
json_data = json.dumps(res_data)
return HttpResponse(json_data, content_type="application/json")
@csrf_exempt
def register(request):
res_data = {'isOk': False, 'errmsg': '未知错误'}
if request.method == 'POST' and request.is_ajax():
userid = request.session.get('userid')
user = UserData.objects.filter(id=userid)
if not user.exists():
print('用户不存在')
res_data['errmsg'] = 'User Not Found'
json_data = json.dumps(res_data)
return HttpResponse(json_data, content_type="application/json")
user = user.first()
if not request.body.decode():
print('没有收到信息')
res_data['errmsg'] = 'Msg Not Found'
json_data = json.dumps(res_data)
return HttpResponse(json_data, content_type="application/json")
data = json.loads(request.body.decode())
qq = data.get('qq')
tel = data.get('tel')
if qq and tel and user:
user.qq = qq
user.tel = tel
user.save()
res_data['isOk'] = True
res_data['userInfo'] = getUserInfo(userid)
json_data = json.dumps(res_data)
return HttpResponse(json_data, content_type="application/json")
@csrf_exempt
def get_rank(request):
res_data = {'isOk': False, 'errmsg': '未知错误'}
if request.method == 'POST':
userid = request.session.get('userid')
if userid:
data = json.loads(request.body.decode())
if data.get('who') == 'team':
res_data['isOk'] = True
res_data['teams'] = list(
Team.objects.all().order_by('-score', 'finish_time','-id')[:25].values_list('id', 'name', 'score'))
res_data['users'] = list(
UserData.objects.all().order_by('-score', 'finish_time','-id')[:15].values_list('id', 'name', 'score'))
# 已经获得登陆授权~
json_data = json.dumps(res_data)
return HttpResponse(json_data, content_type="application/json")
@csrf_exempt
def find_team(request):
res_data = {'isOk': False, 'errmsg': '未知错误'}
if request.method == 'POST':
userid = request.session.get('userid')
if userid:
data = json.loads(request.body.decode())
team_find = data.get('teamfind')
print(team_find)
# if(team_find)
if team_find.isdigit():
team1 = Team.objects.filter(id=team_find)
if team1.exists():
team1 = team1.first()
# if len(team1.mems.all()) < 3:
res_data['team'] = {'name': team1.name, 'id': team1.id, 'leader': team1.leader}
res_data['isOk'] = True
team2 = Team.objects.filter(Q(name__contains=team_find) | Q(leader__contains=team_find))
if team2.exists() and not res_data['isOk']:
team2 = team2.first()
# if len(team2.mems.all()) < 3:
res_data['team'] = {'name': team2.name, 'id': team2.id, 'leader': team2.leader}
res_data['isOk'] = True
json_data = json.dumps(res_data)
return HttpResponse(json_data, content_type="application/json")
@csrf_exempt
def join_team(request):
res_data = {'isOk': False, 'errmsg': '未知错误'}
if request.method == 'POST':
userid = request.session.get('userid')
if userid:
user = UserData.objects.filter(id=userid)
if user.exists():
user = user.first()
data = json.loads(request.body.decode())
teamid = data.get('id')
team = Team.objects.filter(id=teamid).first()
if len(team.mems.all()) >= 3:
res_data['errmsg'] = '队伍人数已满'
elif isScored(team):
res_data['errmsg'] = '队伍已经开始答题'
else:
user.team = team
user.save()
res_data['isOk'] = True
res_data['userInfo'] = getUserInfo(userid)
# 已经获得登陆授权~
json_data = json.dumps(res_data)
return HttpResponse(json_data, content_type="application/json")
@csrf_exempt
def quit_team(request):
res_data = {'isOk': False, 'errmsg': '未知错误'}
if request.method == 'POST':
userid = request.session.get('userid')
if userid:
user = UserData.objects.filter(id=userid)
data = json.loads(request.body.decode())
if user.exists() and data.get('id'):
user = UserData.objects.filter(id=data.get('id')).first()
teamid = user.team.id
print(teamid)
if isScored(Team.objects.get(id=teamid)):
res_data['errmsg'] = '队伍已经开始答题'
else:
user.team = None
user.save()
res_data['isOk'] = True
res_data['userInfo'] = getUserInfo(userid)
# 已经获得登陆授权~
json_data = json.dumps(res_data)
return HttpResponse(json_data, content_type="application/json")
def logout(request):
request.session['userid'] = ''
res_data = {'isOk': True}
json_data = json.dumps(res_data)
return HttpResponse(json_data, content_type="application/json")
@csrf_exempt
def del_team(request):
res_data = {'isOk': False, 'errmsg': '无法完成操作'}
team = Team()
user = UserData()
if request.method == 'POST':
userid = request.session.get('userid')
if userid:
user = UserData.objects.get(id=int(userid))
team = Team.objects.filter(leader=user.name)
if team.exists():
team = team.first()
res_data['isOk'] = True
else:
res_data['errmsg'] = '你好像不是队伍的队长~'
else:
res_data['errmsg'] = '未找到用户'
# 找到预备被删除的队伍~
if res_data['isOk']:
if isScored(team):
res_data['errmsg'] = '队伍已经开始答题'
res_data['isOk'] = False
else:
request.session['team'] = ''
team_del = team
team_del.delete()
res_data['userInfo'] = getUserInfo(userid)
json_data = json.dumps(res_data)
return HttpResponse(json_data, content_type="application/json")
# 答题系统apis
@csrf_exempt
def get_questions(request, page_num=0):
res_data = {'isOk': False, 'errmsg': '未知错误'}
finished = False
if request.method == 'POST':
userid = request.session.get('userid')
if userid:
user = UserData.objects.select_related('team').filter(id=userid)
if user.exists():
user = user.first()
stage = get_stage()
seed = 0
if stage:
if stage.name == '预选赛':
finished = user.team.isFinished
seed = user.team.id
pass
elif stage.name == '团队赛':
finished = user.team.isFinished
seed = user.team.id
pass
elif stage.name == '复活赛':
finished = user.isFinished
seed = user.id
pass
elif stage.name == '个人赛':
finished = user.isFinished
seed = user.id
pass
else:
print('不在比赛时间')
res_data['errmsg'] = '不在比赛时间'
return HttpResponse(json.dumps(res_data).encode(), content_type="application/json")
page_now = []
res_data = {'isOk': False}
# 遍历所有难度,每个难度分别随机出来指定数目的题目
# 12 12 6
# 0 10 0
# 9 12 9
diffs = [0, 1, 2]
questions = []
for diff in diffs:
questions_fliter = Question.objects.filter(stage__name=stage.name, difficulty=diff).order_by('id')
print('难度%d,筛选之前共有题目%d-> ' % (diff, questions_fliter.count()), end='')
for i in questions_fliter:
print(i.id, end=' ')
print()
random.seed(seed)
questions_fliter = list(questions_fliter)
questions_fliter = random.sample(questions_fliter, len(questions_fliter) // 2)
print('难度%d,筛选完了还有题目%d-> ' % (diff, len(questions_fliter)), end='')
for i in questions_fliter:
print(i.id, end=' ')
print()
questions += questions_fliter
print('筛选完了总共还有题目%d-> ' % len(questions), end='')
for i in questions:
print(i.id, end=' ')
print()
# questions = Question.objects.filter(stage__name=stage.name).order_by('id')
# random.seed(user.team.id)
# questions = list(questions)
# questions = random.sample(questions, 30)
pages = Paginator(questions, 10)
print('一共有', pages.count, '题')
print('一共有', pages.num_pages, '页')
res_data['page_count'] = pages.count
res_data['page_num_pages'] = pages.num_pages
if page_num == 0:
res_data['isOk'] = True
page_num = 1
page_now = pages.page(page_num).object_list
answered_questions = 0
if stage.name in ['团队赛', '预选赛']:
answered_questions = Question.objects.filter(answer__user__team=user.team,
stage=stage).distinct()
elif stage.name in ['复活赛', '个人赛']:
answered_questions = Question.objects.filter(answer__user=user, stage=stage).distinct()
answered_num = len(answered_questions)
res_data['answered_num_all'] = answered_num
res_data['userInfo'] = getUserInfo(user.id)
elif page_num in pages.page_range:
page_now = pages.page(page_num).object_list
res_data['isOk'] = True
else:
res_data['isOk'] = False
res_data['errmsg'] = '页码超出范围'
if res_data['isOk']:
res_data['page_num'] = page_num
res_data['isFinished'] = finished
questions = json.loads(
serializers.serialize('json', page_now, fields=('question_text', 'choices', 'difficulty')))
# res_data['answered_choices'] = json.loads(
# serializers.serialize('json',
# Answer.objects.filter(user__team=user.team,
# question__in=page_now).order_by('pub_time'),
# fields=('question_text', 'choices', 'difficulty')))
for i in range(len(questions)):
# 查看最近选项功能
if stage.name in ['团队赛', '预选赛']:
answers = Answer.objects.filter(user__team=user.team, question_id=questions[i]['pk'],
stage=stage)
elif stage.name in ['复活赛', '个人赛']:
print('个人赛')
answers = Answer.objects.filter(user=user, question_id=questions[i]['pk'],
stage=stage)
if answers.exists():
questions[i]['answered_num'] = answers.aggregate(Sum('pub_num')).get('pub_num__sum', 0)
questions[i]['answered_choices'] = answers.order_by('pub_time')[0].choose
else:
questions[i]['answered_num'] = 0
questions[i]['answered_choices'] = -1
res_data['questions'] = questions
else:
res_data['errmsg'] = '请登陆后再试'
return JsonResponse(res_data)
@csrf_exempt
def get_stages(request):
time_now = int(time.time())
data = {'isOk': True, 'time_now': time_now}
stages = TimeTable.objects.all().order_by('id')
stages_data = json.loads(serializers.serialize('json', stages))
stages_fields = []
for i in range(len(stages)):
data_temp = {
'name': stages[i].name,
'timeStart': get_time(stages[i].timeStart),
'timeEnd': get_time(stages[i].timeEnd),
}
stages_fields.append(data_temp)
stages_data[i]['fields'] = data_temp
data['stages'] = stages_data
if get_stage():
data['stage'] = get_stage().id
else:
data['stage'] = 0
return HttpResponse(json.dumps(data).encode(), content_type="application/json")
def add_time(user, stage, diff):
if stage.name == '团队赛':
user.team.add_time += 2
user.team.save()
pass
elif stage.name == '复活赛':
if diff >= 1:
user.add_time += 2
user.save()
elif stage.name == '个人赛':
user.add_time += pow(2, diff)
user.save()
def add_score(user, stage, diff):
if stage.name == '预选赛':
user.team.score += 2
user.team.save()
elif stage.name == '团队赛':
user.team.score += 2 * diff + 1
print('加分' + str(2 * diff + 1))
user.team.save()
elif stage.name == '复活赛':
user.score += 2
user.save()
elif stage.name == '个人赛':
user.score += 3 * diff + 2
user.save()
@csrf_exempt
def submit_answer(request):
res_data = {'isOk': False, 'errmsg': '未知错误'}
user = isAuthed(request)
stage_type = ''
if user:
print(user, '身份验证成功')
if not isTeamed(user):
print('没有加入队伍')
res_data['errmsg'] = '没有加入队伍'
return JsonResponse(res_data)
stage = get_stage()
if not stage:
print('不在比赛时间')
res_data['errmsg'] = '不在比赛时间'
return JsonResponse(res_data)
if stage.name in ['团队赛', '预选赛']:
stage_type = 'team'
elif stage.name in ['复活赛', '个人赛']:
stage_type = 'single'
if user.team.isFinished and stage_type == 'team':
print('已经完成比赛,禁止答题')
res_data['errmsg'] = '禁止答题'
return JsonResponse(res_data)
if user.isFinished and stage_type == 'single':
print('已经完成比赛,禁止答题')
res_data['errmsg'] = '禁止答题'
return JsonResponse(res_data)
data = json.loads(request.body.decode())
if data.get('allOk'):
# 都答完了,计算罚时
answered_question_num = 0
if stage_type == 'team':
answered_question_num = Question.objects.filter(answer__user__team=user.team, stage=stage).count()
elif stage_type == 'single':
answered_question_num = Question.objects.filter(answer__user=user, stage=stage).count()
print(Question.objects.filter(answer__user=user, stage=stage), stage)
# fixme 根据比赛场次计算已答题(team/singe
question_num = Question.objects.filter(stage=stage).count() // 2
print('answered_question_num', answered_question_num)
print('question_num', question_num)
if question_num <= answered_question_num:
dt = timezone.now() - stage.timeStart
def add(obj):
# 罚时计算函数
obj.finish_time = dt.total_seconds() / 60 + obj.add_time
obj.isFinished = True
obj.save()
if stage_type == 'team':
add(user.team)
elif stage_type == 'single':
add(user)
# 已经完成比赛
res_data['isOk'] = True
else:
res_data['errmsg'] = "请完成所有题目后再提交哟"
return JsonResponse(res_data)
pk = data.get('question_pk', -1)
choice = data.get('choice', -1)
print(data)
if pk != -1 and choice != -1:
# 传入答案合法
choice_now = Answer.objects.filter(user=user, question_id=int(pk), stage=stage)
answered_num = 0
if stage_type == 'team':
print('stage_type == "team"')
team_answered = Answer.objects.filter(user__team=user.team, question_id=int(pk), stage=stage)
answered_num = team_answered.aggregate(Sum('pub_num')).get('pub_num__sum', 0)
if not answered_num:
answered_num = 0
elif stage_type == 'single':
print('stage_type == "single"')
answered_num = choice_now.aggregate(Sum('pub_num')).get('pub_num__sum', 0)
if not answered_num:
answered_num = 0
question_answered = Question.objects.get(pk=pk)
diff = question_answered.difficulty
# 进入判题逻辑
if choice_now.exists():
# user已经答过一次了
print('user已经答过一次了')
choice_now = choice_now.first()
# print(choice_now.pub_num)
if answered_num <= 1:
# 队没答过
if question_answered.key == choice:
# 答对了
if not choice_now.isRight:
print('上次答错了这次答对了,加分')
choice_now.isRight = True
choice_now.save()
add_score(user, stage, diff)
else:
print('上次答对了这次答对了,不变(是谁两次提交一样的答案?)')
else:
# 答错了 先罚时再说
add_time(user, stage, diff)
# 看看上次怎么样呢
if not choice_now.isRight:
print('上次答错了这次答错了,只罚时')
else:
if stage.name == '预选赛':
user.team.score -= 2
user.team.save()
elif stage.name == '团队赛':
user.team.score -= 2 * diff + 1
user.team.save()
elif stage.name == '复活赛':
user.score -= 2
user.save()
elif stage.name == '个人赛':
user.score -= 3 * diff + 2
user.save()
choice_now.isRight = False
choice_now.save()
print('上次答对了这次答错了减分儿,罚时')
choice_now.pub_num = choice_now.pub_num + 1
choice_now.choose = choice
choice_now.save()
res_data['isOk'] = True
else:
res_data['errmsg'] = '答题次数达到上限'
else:
# user没有答过
print('user没有答过')
if answered_num == 0:
if question_answered.key == choice:
# 答对了
add_score(user, stage, diff)
print('答对了,加分儿')
else:
# 答错了
add_time(user, stage, diff)
print('答错了,罚时')
res_data['isOk'] = True
elif answered_num == 1:
team_answered = team_answered.first()
if question_answered.key == choice:
# 答对了
if not team_answered.isRight:
print('上次答错了这次答对了,加分')
add_score(user, stage, diff)
else:
print('上次答对了这次答对了,不变(是谁两次提交一样的答案?)')
else:
# 答错了 先罚时再说
add_time(user, stage, diff)
# 看看上次怎么样呢
if not team_answered.isRight:
print('上次答错了这次答错了,不做')
else:
if stage.name == '预选赛':
user.team.score -= 2
user.team.save()
elif stage.name == '团队赛':
user.team.score -= 2 * diff + 1
user.team.save()
elif stage.name == '复活赛':
user.score -= 2
user.save()
elif stage.name == '个人赛':
user.score -= 3 * diff + 2
user.save()
print('上次答对了这次答错了减分儿')
# choice_now.pub_num = choice_now.pub_num + 1
# choice_now.choose = choice
# choice_now.save()
elif answered_num >= 2:
res_data['errmsg'] = '答题次数达到上限'
return JsonResponse(res_data)
res_data['isOk'] = True
Answer.objects.create(
user=user,
question=question_answered,
isRight=question_answered.key == choice,
choose=choice,
pub_num=1,
stage=stage
)
res_data['isOk'] = True
print('首次答题成功')
else:
res_data['errmsg'] = '传入数据不合法'
else:
res_data['errmsg'] = '获取授权信息失败'
return JsonResponse(res_data)
# return HttpResponse(json.dumps(res_data).encode(), content_type="application/json")
# tools
def get_team_questions(question_id):
print(question_id)
def get_stage():
stages = TimeTable.objects.all().order_by('id')
time_now = timezone.now()
for stage in stages:
if stage.timeStart <= time_now <= stage.timeEnd:
return stage
return None
def get_time(datetime):
return int(time.mktime(timezone.localtime(datetime).timetuple()))
def isAuthed(request):
if request.method == 'POST':
userid = request.session.get('userid')
if userid:
user = UserData.objects.filter(id=userid)
if user.exists():
user = user.first()
return user
return None
def isTeamed(user):
if user.team:
return True
else:
return False
def isScored(team):
if team.score > 0:
return True
else:
return False
def getUserInfo(userid):
user = UserData.objects.get(pk=userid)
stage = get_stage()
finished = False
team = user.team
team_data = {}
if team:
if stage:
if stage.name == '预选赛':
finished = user.team.isFinished
pass
elif stage.name == '团队赛':
finished = user.team.isFinished
pass
elif stage.name == '复活赛':
finished = user.isFinished
pass
elif stage.name == '个人赛':
finished = user.isFinished
pass
mems_data = []
mems = team.mems.all()
# print(list(mems.values_list('id', 'name')))
for mem in mems:
mems_data.append({'id': mem.id, 'name': mem.name})
team_data = {
'id': team.id,
'name': team.name,
'score': team.score,
'leader': team.leader,
'mems': mems_data
}
if stage:
print('1')
return {
'id': user.id,
'name': user.name,
'score': user.score,
'tel': user.tel,
'qq': user.qq,
'stage': stage.pk,
'team': team_data,
'isFinished': finished
}
else:
print('2')
return {
'id': user.id,
'name': user.name,
'score': user.score,
'tel': user.tel,
'qq': user.qq,
'stage': 0,
'team': team_data,
'isFinished': finished
}
def login_std(username='16122364', psword='<PASSWORD>'):
# res_pre = session.get(url_login)
data = {
'card_id': username,
'password': <PASSWORD>,
}
res = session.post(url_login,
data=json.dumps(data), headers=headers)
if res.status_code == 200:
return json.loads(res.text)
else:
return False
def yxwh(request, txt):
content = {'txt': txt}
return render(request, 'JustSearch/yxwh.html', content)
| 1.914063 | 2 |
sdk/python/kfp/dsl/_volume_snapshot_op.py | deepk2u/pipelines | 3 | 12758093 | <filename>sdk/python/kfp/dsl/_volume_snapshot_op.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from kubernetes.client.models import (
V1Volume, V1TypedLocalObjectReference, V1ObjectMeta
)
from ._resource_op import ResourceOp
from ._pipeline_param import match_serialized_pipelineparam, sanitize_k8s_name
class VolumeSnapshotOp(ResourceOp):
"""Represents an op which will be translated into a resource template
which will be creating a VolumeSnapshot.
TODO(https://github.com/kubeflow/pipelines/issues/4822): Determine the
stability level of this feature.
Args:
resource_name: A desired name for the VolumeSnapshot which will be created
pvc: The name of the PVC which will be snapshotted
snapshot_class: The snapshot class to use for the dynamically created VolumeSnapshot
annotations: Annotations to be patched in the VolumeSnapshot
volume: An instance of V1Volume
kwargs: See :py:class:`kfp.dsl.ResourceOp`
Raises:
ValueError: if k8s_resource is provided along with other arguments
if k8s_resource is not a VolumeSnapshot
if pvc and volume are None
if pvc and volume are not None
if volume does not reference a PVC
"""
def __init__(self,
resource_name: str = None,
pvc: str = None,
snapshot_class: str = None,
annotations: Dict[str, str] = None,
volume: V1Volume = None,
api_version: str = "snapshot.storage.k8s.io/v1alpha1",
**kwargs):
# Add size to output params
self.attribute_outputs = {"size": "{.status.restoreSize}"}
# Add default success_condition if None provided
if "success_condition" not in kwargs:
kwargs["success_condition"] = "status.readyToUse == true"
if "k8s_resource" in kwargs:
if resource_name or pvc or snapshot_class or annotations or volume:
raise ValueError("You cannot provide k8s_resource along with "
"other arguments.")
# TODO: Check if is VolumeSnapshot
super().__init__(**kwargs)
self.snapshot = V1TypedLocalObjectReference(
api_group="snapshot.storage.k8s.io",
kind="VolumeSnapshot",
name=self.outputs["name"]
)
return
if not (pvc or volume):
raise ValueError("You must provide a pvc or a volume.")
elif pvc and volume:
raise ValueError("You can't provide both pvc and volume.")
source = None
deps = []
if pvc:
source = V1TypedLocalObjectReference(
kind="PersistentVolumeClaim",
name=pvc
)
else:
if not hasattr(volume, "persistent_volume_claim"):
raise ValueError("The volume must be referencing a PVC.")
if hasattr(volume, "dependent_names"): #TODO: Replace with type check
deps = list(volume.dependent_names)
source = V1TypedLocalObjectReference(
kind="PersistentVolumeClaim",
name=volume.persistent_volume_claim.claim_name
)
# Set the k8s_resource
# TODO: Use VolumeSnapshot
if not match_serialized_pipelineparam(str(resource_name)):
resource_name = sanitize_k8s_name(resource_name)
snapshot_metadata = V1ObjectMeta(
name="{{workflow.name}}-%s" % resource_name,
annotations=annotations
)
k8s_resource = {
"apiVersion": api_version,
"kind": "VolumeSnapshot",
"metadata": snapshot_metadata,
"spec": {"source": source}
}
if snapshot_class:
k8s_resource["spec"]["snapshotClassName"] = snapshot_class
super().__init__(
k8s_resource=k8s_resource,
**kwargs
)
self.dependent_names.extend(deps)
self.snapshot = V1TypedLocalObjectReference(
api_group="snapshot.storage.k8s.io",
kind="VolumeSnapshot",
name=self.outputs["name"]
)
| 2.203125 | 2 |
setup.py | lumapps/endpoints-python | 0 | 12758094 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from setuptools import setup, find_packages
# Get the version
version_regex = r'__version__ = ["\']([^"\']*)["\']'
with open('endpoints/__init__.py', 'r') as f:
text = f.read()
match = re.search(version_regex, text)
if match:
version = match.group(1)
else:
raise RuntimeError("No version number found!")
install_requires = [
'attrs==17.4.0',
'google-endpoints-api-management>=1.10.0',
'semver==2.7.7',
'setuptools>=36.2.5',
]
setup(
name='google-endpoints',
version=version,
description='Google Cloud Endpoints',
long_description=open('README.rst').read(),
author='Google Endpoints Authors',
author_email='<EMAIL>',
url='https://github.com/cloudendpoints/endpoints-python',
packages=find_packages(exclude=['test', 'test.*']),
package_dir={'google-endpoints': 'endpoints'},
include_package_data=True,
license='Apache',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
],
scripts=['endpoints/endpointscfg.py'],
tests_require=['mock', 'protobuf', 'protorpc', 'pytest', 'webtest'],
install_requires=install_requires,
)
| 1.8125 | 2 |
test.py | Rohit19060/Python-Utils | 1 | 12758095 |
import requests
project_key = 'RS_P_1495608305106685963'
token = 'v5sRS_P_1495608305106685963s1496806325613631987'
api_key = '<KEY>'
# Authentication API
# url = "https://api.sports.roanuz.com/v5/core/{}/auth/".format(project_key)
# payload = {
# 'api_key': api_key
# }
# response = requests.post(url, json=payload)
# print(response.json())
# Association List
# print("Association List API")
# url = "https://api.sports.roanuz.com/v5/cricket/{}/association/list/".format(
# project_key)
# headers = {
# 'rs-token': token
# }
# response = requests.get(url, headers=headers)
# print(response.json())
# Country List
# print("Country List API")
# url = "https://api.sports.roanuz.com/v5/cricket/{}/country/list/".format(
# project_key)
# headers = {
# 'rs-token': token
# }
# response = requests.get(url, headers=headers)
# print(response.json())
# Featured List
# url = "https://api.sports.roanuz.com/v5/cricket/{}/featured-tournaments/".format(
# project_key)
# headers = {
# 'rs-token': token
# }
# response = requests.get(url, headers=headers)
# print(response.json())
# Featured Matches
tournament_key = "nzwindw_2022"
url = "https://api.sports.roanuz.com/v5/cricket/{}/tournament/{}/featured-matches/".format(
project_key, tournament_key)
headers = {
'rs-token': token
}
response = requests.get(url, headers=headers)
print(response.json())
| 2.34375 | 2 |
components/topmenu/__init__.py | Solomon1999/kivystudio | 1 | 12758096 | <filename>components/topmenu/__init__.py<gh_stars>1-10
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from kivy.uix.behaviors import ToggleButtonBehavior, ButtonBehavior
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ListProperty, BooleanProperty
from kivystudio.behaviors import HoverBehavior
import dropmenu
__all__ = ('TopMenu',)
class TopMenu(GridLayout):
drop_on_hover = BooleanProperty(False)
def __init__(self, **kwargs):
super(TopMenu, self).__init__(**kwargs)
def drop(self, btn, hover):
if hover and not self.dropdown.parent:
self.dropdown.drop_list=btn.drop_list
self.dropdown.open(btn)
else:
Clock.schedule_once(self.decide_drop)
def decide_drop(self, dt):
if not self.dropdown.hover:
self.dropdown.dismiss()
def drop_menu(self, menu_name, index):
menu = getattr(dropmenu, menu_name)()
menu.open(self.children[index])
class TopMenuItem(HoverBehavior, ButtonBehavior, Label):
def on_hover(self, *args):
if self.hover:
self.text = "[u]" + self.text + "[/u]"
self.color = (.1,.1,.1,1)
else:
self.text = self.text.replace('[u]','').replace('[/u]','')
self.color = (0,0,0,1)
Builder.load_string('''
<TopMenu>:
size_hint_y: None
height: '24dp'
rows: 1
canvas.before:
Color:
rgba: .85,.85,.85,1
Rectangle:
size: self.size
pos: self.pos
TopMenuItem:
text: 'File'
on_release:
root.drop_on_hover=False;
root.drop_menu('FileTopMenu', 4)
TopMenuItem:
text: 'Edit'
on_release:
root.drop_on_hover=False;
root.drop_menu('FileTopMenu', 3)
TopMenuItem:
text: 'View'
on_release:
root.drop_on_hover=False;
root.drop_menu('FileTopMenu', 2)
TopMenuItem:
text: 'Selection'
on_release:
root.drop_on_hover=False;
root.drop_menu('FileTopMenu', 1)
TopMenuItem:
text: 'Help'
on_release:
root.drop_on_hover=False;
root.drop_menu('FileTopMenu', 0)
<TopMenuItem>:
size_hint_x: None
width: '60dp'
markup: True
color: (0,0,0,1)
''') | 2.5 | 2 |
dashboard/dashboard/pinpoint/models/tasks/evaluator.py | Martijnve23/catapult | 1,894 | 12758097 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Consolidated evaluator factory module.
This module consolidates the creation of specific evaluator combinators, used
throughout Pinpoint to evaluate task graphs we support.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from dashboard.pinpoint.models import evaluators
from dashboard.pinpoint.models.tasks import find_isolate
from dashboard.pinpoint.models.tasks import performance_bisection
from dashboard.pinpoint.models.tasks import read_value
from dashboard.pinpoint.models.tasks import run_test
EXCLUDED_PAYLOAD_KEYS = {'commits', 'swarming_request_body'}
class ExecutionEngine(evaluators.SequenceEvaluator):
def __init__(self, job):
# We gather all the evaluators from the modules we know.
super(ExecutionEngine, self).__init__(evaluators=[
evaluators.DispatchByTaskType({
'find_isolate': find_isolate.Evaluator(job),
'find_culprit': performance_bisection.Evaluator(job),
'read_value': read_value.Evaluator(job),
'run_test': run_test.Evaluator(job),
}),
# We then always lift the task payload up, skipping some of the
# larger objects that we know we are not going to need when deciding
# what the end result is.
evaluators.TaskPayloadLiftingEvaluator(
exclude_keys=EXCLUDED_PAYLOAD_KEYS)
])
| 2.21875 | 2 |
dae/dae/pedigrees/tests/test_family_types.py | iossifovlab/gpf | 0 | 12758098 | import pytest
from dae.variants.attributes import Role
from dae.pedigrees.family import FamilyType, Person, Family, FamiliesData
def trio_persons(family_id="trio_family"):
return [
Person(**{
"family_id": family_id,
"person_id": "mom",
"sex": "F",
"role": "mom",
"status": 1
}),
Person(**{
"family_id": family_id,
"person_id": "dad",
"sex": "M",
"role": "dad",
"status": 1
}),
Person(**{
"family_id": family_id,
"person_id": "p1",
"sex": "M",
"role": "prb",
"status": 2
}),
]
@pytest.fixture
def quad_persons():
persons = trio_persons("quad_family")
persons.append(Person(**{
"family_id": "quad_family",
"person_id": "s1",
"sex": "M",
"role": "sib",
"status": 1
}))
return persons
@pytest.fixture
def multigenerational_persons():
persons = trio_persons("multigenerational_family")
persons.append(Person(**{
"family_id": "multigenerational_family",
"person_id": "grandparent",
"sex": "M",
"role": str(Role.maternal_grandfather),
"status": 1
}))
return persons
@pytest.fixture
def simplex_persons():
persons = trio_persons("simplex_family")
persons[0]._status = 2
persons[0]._attributes["status"] = 2
return persons
@pytest.fixture
def simplex_persons_2():
persons = trio_persons("simplex_family")
persons[0]._status = 2
persons[0]._attributes["status"] = 2
persons.append(Person(**{
"family_id": "simplex_family",
"person_id": "s1",
"sex": "M",
"role": "sib",
"status": 1
}))
return persons
@pytest.fixture
def multiplex_persons():
persons = trio_persons("multiplex_family")
persons.append(Person(**{
"family_id": "multiplex_family",
"person_id": "s1",
"sex": "M",
"role": "sib",
"status": 2
}))
return persons
def test_family_type_trio():
family = Family.from_persons(trio_persons())
assert family.family_type is FamilyType.TRIO
def test_family_type_quad(quad_persons):
family = Family.from_persons(quad_persons)
assert family.family_type is FamilyType.QUAD
@pytest.mark.parametrize("role", [
(Role.maternal_grandfather),
(Role.paternal_grandfather),
(Role.maternal_grandmother),
(Role.paternal_grandmother),
])
def test_family_type_multigenerational(role):
persons = list(trio_persons("multigenerational"))
persons.append(Person(**{
"family_id": "multigenerational",
"person_id": "grandparent",
"sex": "U",
"role": str(role),
"status": 1
}))
family = Family.from_persons(persons)
assert family.family_type is FamilyType.MULTIGENERATIONAL
def test_family_type_simplex(simplex_persons):
family = Family.from_persons(simplex_persons)
assert family.family_type is FamilyType.SIMPLEX
def test_family_type_simplex_2(simplex_persons_2):
family = Family.from_persons(simplex_persons_2)
assert family.family_type is FamilyType.SIMPLEX
def test_family_type_multiplex(multiplex_persons):
family = Family.from_persons(multiplex_persons)
assert family.family_type is FamilyType.MULTIPLEX
def test_families_data_families_by_type(
quad_persons,
multigenerational_persons,
simplex_persons,
multiplex_persons
):
families_data = FamiliesData.from_families(
{
"trio_family": Family.from_persons(trio_persons()),
"quad_family": Family.from_persons(quad_persons),
"multigenerational_family": Family.from_persons(multigenerational_persons),
"simplex_family": Family.from_persons(simplex_persons),
"multiplex_family": Family.from_persons(multiplex_persons),
}
)
assert families_data.families_by_type == {
FamilyType.QUAD: {"quad_family"},
FamilyType.TRIO: {"trio_family"},
FamilyType.MULTIGENERATIONAL: {"multigenerational_family"},
FamilyType.SIMPLEX: {"simplex_family"},
FamilyType.MULTIPLEX: {"multiplex_family"},
}
| 2.21875 | 2 |
02-hellocolor.py | marcmenem/learnopengl | 0 | 12758099 | #!/Users/marc/miniconda3/bin/python3
import glfw
from OpenGL.GL import *
from OpenGL.GLU import *
import math
import ctypes
def framebuffer_size_callback(window, width, height):
# make sure the viewport matches the new window dimensions; note that width and
# height will be significantly larger than specified on retina displays.
glViewport(0, 0, width, height)
# process all input: query GLFW whether relevant keys are pressed/released this frame and react accordingly
# ---------------------------------------------------------------------------------------------------------
def processInput(window):
if glfw.get_key(window, glfw.KEY_ESCAPE) == glfw.PRESS:
glfw.set_window_should_close(window, True)
width = 800
height = 600
# Initialize the library
if not glfw.init():
print("Failed to init glfw")
else:
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE)
window = glfw.create_window(width, height, "LearnOpenGL", None, None)
if not window:
print("Failed to create GLFW window")
glfw.terminate()
glfw.make_context_current(window)
glfw.set_framebuffer_size_callback(window, framebuffer_size_callback)
## Load, compile, link shaders
import myshader
shaders = myshader.shader( "shaders/hellocolor.vert", "shaders/hellocolor.frag")
shaders.linkShaders()
# set up vertex data (and buffer(s)) and configure vertex attributes
# ------------------------------------------------------------------
import numpy as np
vertices = np.array([
0.5, 0.5, 0.0, 1.0, 0.0, 0.0, # top right
0.5, -0.5, 0.0, 0.0, 1.0, 0.0, # bottom right
-0.5, -0.5, 0.0, 0.0, 0.0, 1.0, # bottom left
-0.5, 0.5, 0.0, 1.0, 1.0, 1.0 # top left
], dtype=np.float32)
indices = np.array([ # note that we start from 0!
1, 3, 0, # first Triangle
1, 2, 3 # second Triangle
], dtype=np.uint32)
# bind the Vertex Array Object first, then bind and set vertex buffer(s), and then configure vertex attributes(s).
VAO = glGenVertexArrays(1)
glBindVertexArray(VAO)
VBO = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, VBO)
glBufferData(GL_ARRAY_BUFFER, vertices, GL_STATIC_DRAW)
EBO = glGenBuffers(1)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices, GL_STATIC_DRAW)
# glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices_buffer, GL_STATIC_DRAW)
# d = glGetBufferSubData( GL_ELEMENT_ARRAY_BUFFER, 0, 6 * 4)
# print(d)
# d = glGetBufferSubData( GL_ARRAY_BUFFER, 0, 12 * 4)
# print(d)
## position of the attrib array, must match the shader
location = 0
glVertexAttribPointer(location, 3, GL_FLOAT, GL_FALSE, 6*4, None) #3 * 4, 0)
glEnableVertexAttribArray(location)
## position of the attrib array, must match the shader
location = 1
glVertexAttribPointer(location, 3, GL_FLOAT, GL_FALSE, 6*4, ctypes.c_void_p(3*4)) #3 * 4, 0)
glEnableVertexAttribArray(location)
# note that this is allowed, the call to glVertexAttribPointer registered VBO as the
# vertex attribute's bound vertex buffer object so afterwards we can safely unbind
# glBindBuffer(GL_ARRAY_BUFFER, 0)
# remember: do NOT unbind the EBO while a VAO is active as the bound element buffer object
# IS stored in the VAO; keep the EBO bound.
# NO glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
# You can unbind the VAO afterwards so other VAO calls won't accidentally modify this VAO,
# but this rarely happens. Modifying other VAOs requires a call to glBindVertexArray anyways
# so we generally don't unbind VAOs (nor VBOs) when it's not directly necessary.
glBindVertexArray(0)
# uncomment this call to draw in wireframe polygons.
# glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# render loop
# -----------
glClearColor(0.9, 0.7, 0.7, 1.0)
shaders.use()
# no need to bind it every time, but we'll do so to keep things a bit more organized
glBindVertexArray(VAO) # seeing as we only have a single VAO there's
while not glfw.window_should_close(window):
# input
processInput(window)
timeValue = glfw.get_time()*1.0
greenValue = (math.sin(timeValue) / 2.0) + 0.5
# print( greenValue )
shaders.setUniform4f( "extraColor", 0.0, greenValue, 0.0, 1.0)
scaleUp = abs( greenValue )
shaders.setUniform1f( "scaleUp", scaleUp)
angle = timeValue
rotation = np.array([
math.cos(angle), - math.sin(angle),
math.sin(angle), math.cos(angle)
], dtype=np.float32)
shaders.setUniformMatrix2fv( "rotation", rotation)
# render
glClear(GL_COLOR_BUFFER_BIT)
# draw our first triangle
# glDrawArrays(GL_TRIANGLES, 0, 6)
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, None)
# glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.)
# -------------------------------------------------------------------------------
glfw.swap_buffers(window)
glfw.poll_events()
glBindVertexArray(0) # no need to unbind it every time
# optional: de-allocate all resources once they've outlived their purpose:
# ------------------------------------------------------------------------
glDeleteVertexArrays(1, [VAO])
glDeleteBuffers(1, [VBO])
glDeleteBuffers(1, [EBO])
# glfw: terminate, clearing all previously allocated GLFW resources.
# ------------------------------------------------------------------
glfw.terminate()
| 2.59375 | 3 |
etc/stackDB.py | acmlia/brain | 2 | 12758100 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 09:52:17 2018
@author: liaamaral
"""
#------
# Load the main libraries
import os
import csv
import numpy as np
import pandas as pd
import logging
#------
# Data input and output paths:
pathin="/media/DATA/tmp/datasets/subsetDB/rain/" # Path of the rain dataset
pathrain="/media/DATA/tmp/datasets/subsetDB/rain/" # Path of the rain dataset
#pathnorain="/Volumes/lia_595gb/randel/python/dados/subsetDB/norain/" # Path of the non rain dataset
#------
# Create the list of Dataframes, eliminating the files that start with ".":
frames = []
for file in os.listdir(pathin):
if file.startswith(".", 0, len(file)):
name = os.path.splitext(file)[0]
print("File name starts with point: ", name)
else:
logging.debug(file)
df = pd.read_csv(os.path.join(pathin, file), sep=',', decimal='.', encoding="utf8")
df.reset_index(drop=True, inplace=True)
frames.append(df)
logging.debug(frames)
#------
# Concatenation of the monthly Dataframes into the yearly Dataframe:
try:
DB_yrly_rain = pd.concat(frames, sort=False, ignore_index=True, verify_integrity=True)
except ValueError as e:
print("ValueError:", e)
# Repairing the additional column wrongly generated in concatenation:
if np.where(np.isfinite(DB_yrly_rain.iloc[:,34])):
DB_yrly_rain["correto"]=DB_yrly_rain.iloc[:,34]
else:
pos=np.where(isnan())
DB_yrly_rain["correto"]=DB_yrly_rain.iloc[:,33]
#DB_yrly_norain = pd.concat(frames)
#------
# Giving the output file names:
DB_name="BR_yrly_rain.csv"
#DB_yrly_norain="BR_yrly_norain_.csv"
#------
# Saving the new output DB's (rain and no rain):
#DB_yrly_rain.to_csv(os.path.join(pathrain, DB_name),index=False,sep=",",decimal='.')
#print("The file ", DB_yrly_rain ," was genetared!")
DB_yrly_rain.to_csv(os.path.join(pathrain, DB_name),index=False,sep=",",decimal='.')
print("The file ", DB_name ," was genetared!")
| 2.71875 | 3 |
charon/utils/archive.py | shokakucarrier/mrrc-uploader | 0 | 12758101 | """
Copyright (C) 2021 Red Hat, Inc. (https://github.com/Commonjava/charon)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import sys
import tarfile
import requests
import tempfile
import shutil
from enum import Enum
from json import load, JSONDecodeError
from typing import Tuple
from zipfile import ZipFile, is_zipfile
logger = logging.getLogger(__name__)
def extract_zip_all(zf: ZipFile, target_dir: str):
zf.extractall(target_dir)
def extract_zip_with_files(zf: ZipFile, target_dir: str, file_suffix: str, debug=False):
names = zf.namelist()
filtered = list(filter(lambda n: n.endswith(file_suffix), names))
if debug:
logger.debug("Filtered files list as below with %s", file_suffix)
for name in filtered:
logger.debug(name)
zf.extractall(target_dir, members=filtered)
def extract_npm_tarball(path: str, target_dir: str, is_for_upload: bool) -> Tuple[str, list]:
""" Extract npm tarball will relocate the tgz file and metadata files.
* Locate tar path ( e.g.: jquery/-/jquery-7.6.1.tgz or @types/jquery/-/jquery-2.2.3.tgz).
* Locate version metadata path (e.g.: jquery/7.6.1 or @types/jquery/2.2.3).
Result returns the version meta file path and is for following package meta generating.
"""
valid_paths = []
package_name_path = str()
tgz = tarfile.open(path)
tgz.extractall()
for f in tgz:
if f.name.endswith("package.json"):
parse_paths = __parse_npm_package_version_paths(f.path)
package_name_path = parse_paths[0]
os.makedirs(os.path.join(target_dir, parse_paths[0]))
tarball_parent_path = os.path.join(target_dir, parse_paths[0], "-")
valid_paths.append(os.path.join(tarball_parent_path, _get_tgz_name(path)))
version_metadata_parent_path = os.path.join(
target_dir, parse_paths[0], parse_paths[1]
)
valid_paths.append(os.path.join(version_metadata_parent_path, "package.json"))
if is_for_upload:
os.makedirs(tarball_parent_path)
target = os.path.join(tarball_parent_path, os.path.basename(path))
shutil.copyfile(path, target)
os.makedirs(version_metadata_parent_path)
target = os.path.join(version_metadata_parent_path, os.path.basename(f.path))
shutil.copyfile(f.path, target)
break
return package_name_path, valid_paths
def _get_tgz_name(path: str):
parts = path.split("/")
if len(parts) > 0:
return parts[-1]
return ""
def __parse_npm_package_version_paths(path: str) -> list:
try:
with open(path, encoding='utf-8') as version_package:
data = load(version_package)
package_version_paths = [data['name'], data['version']]
return package_version_paths
except JSONDecodeError:
logger.error('Error: Failed to parse json!')
class NpmArchiveType(Enum):
"""Possible types of detected archive"""
NOT_NPM = 0
DIRECTORY = 1
ZIP_FILE = 2
TAR_FILE = 3
def detect_npm_archive(repo):
"""Detects, if the archive needs to have npm workflow.
:parameter repo repository directory
:return NpmArchiveType value
"""
expanded_repo = os.path.expanduser(repo)
if not os.path.exists(expanded_repo):
logger.error("Repository %s does not exist!", expanded_repo)
sys.exit(1)
if os.path.isdir(expanded_repo):
# we have archive repository
repo_path = "".join((expanded_repo, "/package.json"))
if os.path.isfile(repo_path):
return NpmArchiveType.DIRECTORY
elif is_zipfile(expanded_repo):
# we have a ZIP file to expand
with ZipFile(expanded_repo) as zz:
try:
if zz.getinfo("package.json"):
return NpmArchiveType.ZIP_FILE
except KeyError:
pass
elif tarfile.is_tarfile(expanded_repo):
with tarfile.open(expanded_repo) as tt:
try:
if tt.getmember("package/package.json").isfile():
return (
NpmArchiveType.TAR_FILE
) # it is a tar file and has package.json in the right place
except KeyError:
pass
return NpmArchiveType.NOT_NPM
def download_archive(url: str, base_dir=None) -> str:
dir_ = base_dir
if not dir_ or not os.path.isdir(dir_):
dir_ = tempfile.mkdtemp()
logger.info("No base dir specified for holding archive."
" Will use a temp dir %s to hold archive",
dir_)
# Used solution here:
# https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests
local_filename = os.path.join(dir_, url.split('/')[-1])
# NOTE the stream=True parameter below
# NOTE(2) timeout=30 parameter to set a 30-second timeout, and prevent indefinite hang.
with requests.get(url, stream=True, timeout=30, verify=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
# if chunk:
f.write(chunk)
return local_filename
| 1.992188 | 2 |
planemo/shed/diff.py | martenson/planemo | 0 | 12758102 | """Utilities for calculating effective repository diffs.
Some intelligence is required because the tool shed updates attributes that it
is beneficial to ignore.
"""
from __future__ import print_function
import os
import sys
from xml.etree import ElementTree
from planemo.xml import diff
def diff_and_remove(working, label_a, label_b, f):
"""Remove tool shed XML files and use a smart XML diff on them.
Return 0 if and only if the XML content is the sam after stripping
attirbutes the tool shed updates.
"""
assert label_a != label_b
special = ["tool_dependencies.xml", "repository_dependencies.xml"]
deps_diff = 0
# Could walk either A or B; will only compare if in same relative location
for dirpath, dirnames, filenames in os.walk(os.path.join(working, label_a)):
for filename in filenames:
if filename in special:
a = os.path.join(dirpath, filename)
b = os.path.join(working, label_b, os.path.relpath(a, os.path.join(working, label_a)))
files_exist = os.path.exists(a) and os.path.exists(b)
if files_exist:
deps_diff |= _shed_diff(a, b, f)
os.remove(a)
os.remove(b)
return deps_diff
def _shed_diff(file_a, file_b, f=sys.stdout):
"""Strip attributes the tool shed writes and do smart XML diff.
Returns 0 if and only if the XML content is the same after stripping
``tool_shed`` and ``changeset_revision`` attributes.
"""
xml_a = ElementTree.parse(file_a).getroot()
xml_b = ElementTree.parse(file_b).getroot()
_strip_shed_attributes(xml_a)
_strip_shed_attributes(xml_b)
return diff.diff(xml_a, xml_b, reporter=f.write)
def _strip_shed_attributes(xml_element):
if xml_element.tag == "repository":
_remove_attribs(xml_element)
children = xml_element.getchildren()
if len(children) > 0:
for child in children:
_strip_shed_attributes(child)
def _remove_attribs(xml_element):
for attrib in ["changeset_revision", "toolshed"]:
if attrib in xml_element.attrib:
del xml_element.attrib[attrib]
__all__ = (
"diff_and_remove",
)
| 2.53125 | 3 |
external/iotivity/iotivity_1.2-rel/build_common/iotivityconfig/compiler/configuration.py | SenthilKumarGS/TizenRT | 1,433 | 12758103 | # ------------------------------------------------------------------------
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
class Configuration:
"""Compiler-specific configuration abstract base class"""
def __init__(self, context):
"""
Initialize the Configuration object
Arguments:
context -- the scons configure context
"""
if type(self) is Configuration:
raise TypeError('abstract class cannot be instantiated')
self._context = context # scons configure context
self._env = context.env # scons environment
def check_c99_flags(self):
"""
Check if command line flag is required to enable C99
support.
Returns 1 if no flag is required, 0 if no flag was
found, and the actual flag if one was found.
CFLAGS will be updated with appropriate C99 flag,
accordingly.
"""
return self._check_flags(self._c99_flags(),
self._c99_test_program(),
'.c',
'CFLAGS')
def check_cxx11_flags(self):
"""
Check if command line flag is required to enable C++11
support.
Returns 1 if no flag is required, 0 if no flag was
found, and the actual flag if one was found.
CXXFLAGS will be updated with appropriate C++11 flag,
accordingly.
"""
return self._check_flags(self._cxx11_flags(),
self._cxx11_test_program(),
'.cpp',
'CXXFLAGS')
def has_pthreads_support(self):
"""
Check if PThreads are supported by this system
Returns 1 if this system DOES support pthreads, 0
otherwise
"""
return self._context.TryCompile(self._pthreads_test_program(), '.c')
# --------------------------------------------------------------
# Check if flag is required to build the given test program.
#
# Arguments:
# test_flags -- list of flags that may be needed to build
# test_program
# test_program -- program used used to determine if one of the
# given flags is required to for a successful
# build
# test_extension -- file extension associated with the test
# program, e.g. '.cpp' for C++ and '.c' for C
# flags_key -- key used to retrieve compiler flags that may
# be updated by this check from the SCons
# environment
# --------------------------------------------------------------
def _check_flags(self,
test_flags,
test_program,
test_extension,
flags_key):
# Check if no additional flags are required.
ret = self._context.TryCompile(test_program,
test_extension)
if ret is 0:
# Try flags known to enable compiler features needed by
# the test program.
last_flags = self._env[flags_key]
for flag in test_flags:
self._env.Append(**{flags_key : flag})
ret = self._context.TryCompile(test_program,
test_extension)
if ret:
# Found a flag!
return flag
else:
# Restore original compiler flags for next flag
# test.
self._env.Replace(**{flags_key : last_flags})
return ret
# ------------------------------------------------------------
# Return test program to be used when checking for basic C99
# support.
#
# Subclasses should implement this template method or use the
# default test program found in the DefaultConfiguration class
# through composition.
# ------------------------------------------------------------
def _c99_test_program(self):
raise NotImplementedError('unimplemented method')
# --------------------------------------------------------------
# Get list of flags that could potentially enable C99 support.
#
# Subclasses should implement this template method if flags are
# needed to enable C99 support.
# --------------------------------------------------------------
def _c99_flags(self):
raise NotImplementedError('unimplemented method')
# ------------------------------------------------------------
# Return test program to be used when checking for basic C++11
# support.
#
# Subclasses should implement this template method or use the
# default test program found in the DefaultConfiguration class
# through composition.
# ------------------------------------------------------------
def _cxx11_test_program(self):
raise NotImplementedError('unimplemented method')
# --------------------------------------------------------------
# Get list of flags that could potentially enable C++11 support.
#
# Subclasses should implement this template method if flags are
# needed to enable C++11 support.
# --------------------------------------------------------------
def _cxx11_flags(self):
raise NotImplementedError('unimplemented method')
# --------------------------------------------------------------
# Return a test program to be used when checking for PThreads
# support
#
# --------------------------------------------------------------
def _pthreads_test_program(self):
return """
#include <unistd.h>
#include <pthread.h>
int main()
{
#ifndef _POSIX_THREADS
# error POSIX Threads support not available
#endif
return 0;
}
"""
| 2.078125 | 2 |
aps/transform/utils.py | ishine/aps | 117 | 12758104 | <reponame>ishine/aps
# Copyright 2019 <NAME>
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as tf
import librosa.filters as filters
from aps.const import EPSILON, TORCH_VERSION
from typing import Optional, Tuple
from distutils.version import LooseVersion
if TORCH_VERSION >= LooseVersion("1.7"):
from torch.fft import fft as fft_func
else:
pass
def export_jit(transform: nn.Module) -> nn.Module:
"""
Export transform module for inference
"""
export_out = [module for module in transform if module.exportable()]
return nn.Sequential(*export_out)
def init_window(wnd: str,
frame_len: int,
device: th.device = "cpu") -> th.Tensor:
"""
Return window coefficient
Args:
wnd: window name
frame_len: length of the frame
"""
def sqrthann(frame_len, periodic=True):
return th.hann_window(frame_len, periodic=periodic)**0.5
if wnd not in ["bartlett", "hann", "hamm", "blackman", "rect", "sqrthann"]:
raise RuntimeError(f"Unknown window type: {wnd}")
wnd_tpl = {
"sqrthann": sqrthann,
"hann": th.hann_window,
"hamm": th.hamming_window,
"blackman": th.blackman_window,
"bartlett": th.bartlett_window,
"rect": th.ones
}
if wnd != "rect":
# match with librosa
c = wnd_tpl[wnd](frame_len, periodic=True)
else:
c = wnd_tpl[wnd](frame_len)
return c.to(device)
def init_kernel(frame_len: int,
frame_hop: int,
window: th.Tensor,
round_pow_of_two: bool = True,
normalized: bool = False,
inverse: bool = False,
mode: str = "librosa") -> Tuple[th.Tensor, th.Tensor]:
"""
Return STFT kernels
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window tensor
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: return normalized DFT matrix
inverse: return iDFT matrix
mode: framing mode (librosa or kaldi)
"""
if mode not in ["librosa", "kaldi"]:
raise ValueError(f"Unsupported mode: {mode}")
# FFT size: B
if round_pow_of_two or mode == "kaldi":
fft_size = 2**math.ceil(math.log2(frame_len))
else:
fft_size = frame_len
# center padding window if needed
if mode == "librosa" and fft_size != frame_len:
lpad = (fft_size - frame_len) // 2
window = tf.pad(window, (lpad, fft_size - frame_len - lpad))
if normalized:
# make K^H * K = I
S = fft_size**0.5
else:
S = 1
# W x B x 2
if TORCH_VERSION >= LooseVersion("1.7"):
K = fft_func(th.eye(fft_size) / S, dim=-1)
K = th.stack([K.real, K.imag], dim=-1)
else:
I = th.stack([th.eye(fft_size), th.zeros(fft_size, fft_size)], dim=-1)
K = th.fft(I / S, 1)
if mode == "kaldi":
K = K[:frame_len]
if inverse and not normalized:
# to make K^H * K = I
K = K / fft_size
# 2 x B x W
K = th.transpose(K, 0, 2)
# 2B x 1 x W
K = th.reshape(K, (fft_size * 2, 1, K.shape[-1]))
return K.to(window.device), window
def mel_filter(frame_len: int,
round_pow_of_two: bool = True,
num_bins: Optional[int] = None,
sr: int = 16000,
num_mels: int = 80,
fmin: float = 0.0,
fmax: Optional[float] = None,
norm: bool = False) -> th.Tensor:
"""
Return mel filter coefficients
Args:
frame_len: length of the frame
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
num_bins: number of the frequency bins produced by STFT
num_mels: number of the mel bands
fmin: lowest frequency (in Hz)
fmax: highest frequency (in Hz)
norm: normalize the mel filter coefficients
"""
# FFT points
if num_bins is None:
N = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
else:
N = (num_bins - 1) * 2
# fmin & fmax
freq_upper = sr // 2
if fmax is None:
fmax = freq_upper
else:
fmax = min(fmax + freq_upper if fmax < 0 else fmax, freq_upper)
fmin = max(0, fmin)
# mel filter coefficients
mel = filters.mel(sr,
N,
n_mels=num_mels,
fmax=fmax,
fmin=fmin,
htk=True,
norm="slaney" if norm else None)
# num_mels x (N // 2 + 1)
return th.tensor(mel, dtype=th.float32)
def speed_perturb_filter(src_sr: int,
dst_sr: int,
cutoff_ratio: float = 0.95,
num_zeros: int = 64) -> th.Tensor:
"""
Return speed perturb filters, reference:
https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py
Args:
src_sr: sample rate of the source signal
dst_sr: sample rate of the target signal
Return:
weight (Tensor): coefficients of the filter
"""
if src_sr == dst_sr:
raise ValueError(
f"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}")
gcd = math.gcd(src_sr, dst_sr)
src_sr = src_sr // gcd
dst_sr = dst_sr // gcd
if src_sr == 1 or dst_sr == 1:
raise ValueError("do not support integer downsample/upsample")
zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio
padding = 1 + int(num_zeros / zeros_per_block)
# dst_sr x src_sr x K
times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) -
np.arange(src_sr)[None, :, None] / float(src_sr) -
np.arange(2 * padding + 1)[None, None, :] + padding)
window = np.heaviside(1 - np.abs(times / padding),
0.0) * (0.5 + 0.5 * np.cos(times / padding * math.pi))
weight = np.sinc(
times * zeros_per_block) * window * zeros_per_block / float(src_sr)
return th.tensor(weight, dtype=th.float32)
def splice_feature(feats: th.Tensor,
lctx: int = 1,
rctx: int = 1,
op: str = "cat") -> th.Tensor:
"""
Splice feature
Args:
feats (Tensor): N x ... x T x F, original feature
lctx: left context
rctx: right context
op: operator on feature context
Return:
splice (Tensor): feature with context padded
"""
if lctx + rctx == 0:
return feats
if op not in ["cat", "stack"]:
raise ValueError(f"Unknown op for feature splicing: {op}")
# [N x ... x T x F, ...]
ctx = []
T = feats.shape[-2]
for c in range(-lctx, rctx + 1):
idx = th.arange(c, c + T, device=feats.device, dtype=th.int64)
idx = th.clamp(idx, min=0, max=T - 1)
ctx.append(th.index_select(feats, -2, idx))
if op == "cat":
# N x ... x T x FD
splice = th.cat(ctx, -1)
else:
# N x ... x T x F x D
splice = th.stack(ctx, -1)
return splice
def _forward_stft(wav: th.Tensor,
kernel: th.Tensor,
window: th.Tensor,
return_polar: bool = False,
pre_emphasis: float = 0,
frame_hop: int = 256,
onesided: bool = False,
center: bool = False,
eps: float = EPSILON) -> th.Tensor:
"""
STFT function implemented by conv1d (not efficient, but we don't care during training)
Args:
wav (Tensor): N x (C) x S
kernel (Tensor): STFT transform kernels, from init_kernel(...)
return_polar: return [magnitude; phase] Tensor or [real; imag] Tensor
pre_emphasis: factor of preemphasis
frame_hop: frame hop size in number samples
onesided: return half FFT bins
center: if true, we assumed to have centered frames
Return:
transform (Tensor): STFT transform results
"""
wav_dim = wav.dim()
if wav_dim not in [2, 3]:
raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D")
# if N x S, reshape N x 1 x S
# else: reshape NC x 1 x S
N, S = wav.shape[0], wav.shape[-1]
wav = wav.view(-1, 1, S)
# NC x 1 x S+2P
if center:
pad = kernel.shape[-1] // 2
# NOTE: match with librosa
wav = tf.pad(wav, (pad, pad), mode="reflect")
# STFT
kernel = kernel * window
if pre_emphasis > 0:
# NC x W x T
frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]),
stride=frame_hop,
padding=0)
# follow Kaldi's Preemphasize
frames[:, 1:] = frames[:, 1:] - pre_emphasis * frames[:, :-1]
frames[:, 0] *= (1 - pre_emphasis)
# 1 x 2B x W, NC x W x T, NC x 2B x T
packed = th.matmul(kernel[:, 0][None, ...], frames)
else:
packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0)
# NC x 2B x T => N x C x 2B x T
if wav_dim == 3:
packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1])
# N x (C) x B x T
real, imag = th.chunk(packed, 2, dim=-2)
# N x (C) x B/2+1 x T
if onesided:
num_bins = kernel.shape[0] // 4 + 1
real = real[..., :num_bins, :]
imag = imag[..., :num_bins, :]
if return_polar:
mag = (real**2 + imag**2 + eps)**0.5
pha = th.atan2(imag, real)
return th.stack([mag, pha], dim=-1)
else:
return th.stack([real, imag], dim=-1)
def _inverse_stft(transform: th.Tensor,
kernel: th.Tensor,
window: th.Tensor,
return_polar: bool = False,
frame_hop: int = 256,
onesided: bool = False,
center: bool = False,
eps: float = EPSILON) -> th.Tensor:
"""
iSTFT function implemented by conv1d
Args:
transform (Tensor): STFT transform results
kernel (Tensor): STFT transform kernels, from init_kernel(...)
return_polar (bool): keep same with the one in _forward_stft
frame_hop: frame hop size in number samples
onesided: return half FFT bins
center: used in _forward_stft
Return:
wav (Tensor), N x S
"""
# (N) x F x T x 2
transform_dim = transform.dim()
# if F x T x 2, reshape 1 x F x T x 2
if transform_dim == 3:
transform = th.unsqueeze(transform, 0)
if transform_dim != 4:
raise RuntimeError(f"Expect 4D tensor, but got {transform_dim}D")
if return_polar:
real = transform[..., 0] * th.cos(transform[..., 1])
imag = transform[..., 0] * th.sin(transform[..., 1])
else:
real, imag = transform[..., 0], transform[..., 1]
if onesided:
# [self.num_bins - 2, ..., 1]
reverse = range(kernel.shape[0] // 4 - 1, 0, -1)
# extend matrix: N x B x T
real = th.cat([real, real[:, reverse]], 1)
imag = th.cat([imag, -imag[:, reverse]], 1)
# pack: N x 2B x T
packed = th.cat([real, imag], dim=1)
# N x 1 x T
wav = tf.conv_transpose1d(packed,
kernel * window,
stride=frame_hop,
padding=0)
# normalized audio samples
# refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171
num_frames = packed.shape[-1]
win_length = window.shape[0]
# W x T
win = th.repeat_interleave(window[..., None]**2, num_frames, dim=-1)
# Do OLA on windows
# v1)
I = th.eye(win_length, device=win.device)[:, None]
denorm = tf.conv_transpose1d(win[None, ...], I, stride=frame_hop, padding=0)
# v2)
# num_samples = (num_frames - 1) * frame_hop + win_length
# denorm = tf.fold(win[None, ...], (num_samples, 1), (win_length, 1),
# stride=frame_hop)[..., 0]
if center:
pad = kernel.shape[-1] // 2
wav = wav[..., pad:-pad]
denorm = denorm[..., pad:-pad]
wav = wav / (denorm + eps)
# N x S
return wav.squeeze(1)
def _pytorch_stft(wav: th.Tensor,
frame_len: int,
frame_hop: int,
n_fft: int = 512,
return_polar: bool = False,
window: str = "sqrthann",
normalized: bool = False,
onesided: bool = True,
center: bool = False,
eps: float = EPSILON) -> th.Tensor:
"""
Wrapper of PyTorch STFT function
Args:
wav (Tensor): source audio signal
frame_len: length of the frame
frame_hop: hop size between frames
n_fft: number of the FFT size
return_polar: return the results in polar coordinate
window: window tensor
center: same definition with the parameter in librosa.stft
normalized: use normalized DFT kernel
onesided: output onesided STFT
Return:
transform (Tensor), STFT transform results
"""
if TORCH_VERSION < LooseVersion("1.7"):
raise RuntimeError("Can not use this function as TORCH_VERSION < 1.7")
wav_dim = wav.dim()
if wav_dim not in [2, 3]:
raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D")
# if N x C x S, reshape NC x S
wav = wav.view(-1, wav.shape[-1])
# STFT: N x F x T x 2
stft = th.stft(wav,
n_fft,
hop_length=frame_hop,
win_length=window.shape[-1],
window=window,
center=center,
normalized=normalized,
onesided=onesided,
return_complex=False)
if wav_dim == 3:
N, F, T, _ = stft.shape
stft = stft.view(N, -1, F, T, 2)
# N x (C) x F x T x 2
if not return_polar:
return stft
# N x (C) x F x T
real, imag = stft[..., 0], stft[..., 1]
mag = (real**2 + imag**2 + eps)**0.5
pha = th.atan2(imag, real)
return th.stack([mag, pha], dim=-1)
def _pytorch_istft(transform: th.Tensor,
frame_len: int,
frame_hop: int,
window: th.Tensor,
n_fft: int = 512,
return_polar: bool = False,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
eps: float = EPSILON) -> th.Tensor:
"""
Wrapper of PyTorch iSTFT function
Args:
transform (Tensor): results of STFT
frame_len: length of the frame
frame_hop: hop size between frames
window: window tensor
n_fft: number of the FFT size
return_polar: keep same with _pytorch_stft
center: same definition with the parameter in librosa.stft
normalized: use normalized DFT kernel
onesided: output onesided STFT
Return:
wav (Tensor): synthetic audio
"""
if TORCH_VERSION < LooseVersion("1.7"):
raise RuntimeError("Can not use this function as TORCH_VERSION < 1.7")
transform_dim = transform.dim()
# if F x T x 2, reshape 1 x F x T x 2
if transform_dim == 3:
transform = th.unsqueeze(transform, 0)
if transform_dim != 4:
raise RuntimeError(f"Expect 4D tensor, but got {transform_dim}D")
if return_polar:
real = transform[..., 0] * th.cos(transform[..., 1])
imag = transform[..., 0] * th.sin(transform[..., 1])
transform = th.stack([real, imag], -1)
# stft is a complex tensor of PyTorch
stft = th.view_as_complex(transform)
# (N) x S
wav = th.istft(stft,
n_fft,
hop_length=frame_hop,
win_length=window.shape[-1],
window=window,
center=center,
normalized=normalized,
onesided=onesided,
return_complex=False)
return wav
def forward_stft(wav: th.Tensor,
frame_len: int,
frame_hop: int,
window: str = "sqrthann",
round_pow_of_two: bool = True,
return_polar: bool = False,
pre_emphasis: float = 0,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa",
eps: float = EPSILON) -> th.Tensor:
"""
STFT function implementation, equals to STFT layer
Args:
wav: source audio signal
frame_len: length of the frame
frame_hop: hop size between frames
return_polar: return [magnitude; phase] Tensor or [real; imag] Tensor
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
pre_emphasis: factor of preemphasis
normalized: use normalized DFT kernel
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
mode: STFT mode, "kaldi" or "librosa" or "torch"
Return:
transform: results of STFT
"""
window = init_window(window, frame_len, device=wav.device)
if mode == "torch":
n_fft = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
return _pytorch_stft(wav,
frame_len,
frame_hop,
n_fft=n_fft,
return_polar=return_polar,
window=window,
normalized=normalized,
onesided=onesided,
center=center,
eps=eps)
else:
kernel, window = init_kernel(frame_len,
frame_hop,
window=window,
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=False,
mode=mode)
return _forward_stft(wav,
kernel,
window,
return_polar=return_polar,
frame_hop=frame_hop,
pre_emphasis=pre_emphasis,
onesided=onesided,
center=center,
eps=eps)
def inverse_stft(transform: th.Tensor,
frame_len: int,
frame_hop: int,
return_polar: bool = False,
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa",
eps: float = EPSILON) -> th.Tensor:
"""
iSTFT function implementation, equals to iSTFT layer
Args:
transform: results of STFT
frame_len: length of the frame
frame_hop: hop size between frames
return_polar: keep same with function forward_stft(...)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
onesided: output onesided STFT
mode: STFT mode, "kaldi" or "librosa" or "torch"
Return:
wav: synthetic signals
"""
window = init_window(window, frame_len, device=transform.device)
if mode == "torch":
n_fft = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
return _pytorch_istft(transform,
frame_len,
frame_hop,
n_fft=n_fft,
return_polar=return_polar,
window=window,
normalized=normalized,
onesided=onesided,
center=center,
eps=eps)
else:
kernel, window = init_kernel(frame_len,
frame_hop,
window,
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=True,
mode=mode)
return _inverse_stft(transform,
kernel,
window,
return_polar=return_polar,
frame_hop=frame_hop,
onesided=onesided,
center=center,
eps=eps)
class STFTBase(nn.Module):
"""
Base layer for (i)STFT
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
pre_emphasis: factor of preemphasis
mode: STFT mode, "kaldi" or "librosa" or "torch"
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
"""
def __init__(self,
frame_len: int,
frame_hop: int,
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
pre_emphasis: float = 0,
onesided: bool = True,
inverse: bool = False,
center: bool = False,
mode: str = "librosa") -> None:
super(STFTBase, self).__init__()
if mode != "torch":
K, w = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=inverse,
mode=mode)
self.K = nn.Parameter(K, requires_grad=False)
self.w = nn.Parameter(w, requires_grad=False)
self.num_bins = self.K.shape[0] // 4 + 1
self.pre_emphasis = pre_emphasis
self.win_length = self.K.shape[2]
else:
self.K = None
w = init_window(window, frame_len)
self.w = nn.Parameter(w, requires_grad=False)
fft_size = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
self.num_bins = fft_size // 2 + 1
self.pre_emphasis = 0
self.win_length = fft_size
self.frame_len = frame_len
self.frame_hop = frame_hop
self.window = window
self.normalized = normalized
self.onesided = onesided
self.center = center
self.mode = mode
def num_frames(self, wav_len: th.Tensor) -> th.Tensor:
"""
Compute number of the frames
"""
assert th.sum(wav_len <= self.win_length) == 0
if self.center:
wav_len += self.win_length
return th.div(wav_len - self.win_length,
self.frame_hop,
rounding_mode="trunc") + 1
def extra_repr(self) -> str:
str_repr = (
f"num_bins={self.num_bins}, win_length={self.win_length}, " +
f"stride={self.frame_hop}, window={self.window}, " +
f"center={self.center}, mode={self.mode}")
if not self.onesided:
str_repr += f", onesided={self.onesided}"
if self.pre_emphasis > 0:
str_repr += f", pre_emphasis={self.pre_emphasis}"
if self.normalized:
str_repr += f", normalized={self.normalized}"
return str_repr
class STFT(STFTBase):
"""
Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(STFT, self).__init__(*args, inverse=False, **kwargs)
def forward(self,
wav: th.Tensor,
return_polar: bool = False,
eps: float = EPSILON) -> th.Tensor:
"""
Accept (single or multiple channel) raw waveform and output magnitude and phase
Args
wav (Tensor) input signal, N x (C) x S
Return
transform (Tensor), N x (C) x F x T x 2
"""
if self.mode == "torch":
return _pytorch_stft(wav,
self.frame_len,
self.frame_hop,
n_fft=(self.num_bins - 1) * 2,
return_polar=return_polar,
window=self.w,
normalized=self.normalized,
onesided=self.onesided,
center=self.center,
eps=eps)
else:
return _forward_stft(wav,
self.K,
self.w,
return_polar=return_polar,
frame_hop=self.frame_hop,
pre_emphasis=self.pre_emphasis,
onesided=self.onesided,
center=self.center,
eps=eps)
class iSTFT(STFTBase):
"""
Inverse Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(iSTFT, self).__init__(*args, inverse=True, **kwargs)
def forward(self,
transform: th.Tensor,
return_polar: bool = False,
eps: float = EPSILON) -> th.Tensor:
"""
Accept phase & magnitude and output raw waveform
Args
transform (Tensor): STFT output, N x F x T x 2
Return
s (Tensor): N x S
"""
if self.mode == "torch":
return _pytorch_istft(transform,
self.frame_len,
self.frame_hop,
n_fft=(self.num_bins - 1) * 2,
return_polar=return_polar,
window=self.w,
normalized=self.normalized,
onesided=self.onesided,
center=self.center,
eps=eps)
else:
return _inverse_stft(transform,
self.K,
self.w,
return_polar=return_polar,
frame_hop=self.frame_hop,
onesided=self.onesided,
center=self.center,
eps=eps)
| 1.75 | 2 |
Syntages/Scripts/test_api.py | iliya-dehsarvi/Syntages | 1 | 12758105 | <gh_stars>1-10
import requests, json
response = requests.get('https://www.thecocktaildb.com/api/json/v1/1/filter.php?i=Gin')
#print(len(json.loads(response.text)['drinks']))
print(json.dumps(json.loads(response.text), indent=3))
| 2.8125 | 3 |
realtoxicityprompts/scripts/run_prompts_experiment.py | ml-research/MoRT_NMI | 4 | 12758106 | import pickle
from pathlib import Path
from typing import Optional, List, Iterable, Dict, Any
import click
import pandas as pd
import torch
from tqdm import tqdm
from generation.generation import gpt2, gpt3, gpt2_affect, gpt2_ctrl, \
openai_gpt, ctrl, pplm, gpt2mcm
from utils.constants import PERSPECTIVE_API_ATTRIBUTES_LOWER
from utils.perspective_api import PerspectiveWorker, unpack_scores
from utils.utils import load_jsonl, batchify
import sys
import os
ALLOWED_MODELS = ['gpt3', 'gpt2', 'gpt2-affect', 'gpt2-ctrl', 'gpt2-greedy', 'gpt2-naughty-list',
'pplm', 'ctrl', 'openai-gpt', 'xlnet',
'gpt2mcm']
def make_generations_col(generations, responses):
for generation, response in zip(generations, responses):
if response['response']:
response = unpack_scores(response['response'])[0]
else:
response = {x: None for x in PERSPECTIVE_API_ATTRIBUTES_LOWER}
yield {'text': generation, **response}
def collate(dataset: Optional[pd.DataFrame], generations: List[str], responses: Iterable[Dict[str, Any]],
output_file: str):
generations_col_iter = make_generations_col(generations, responses)
if dataset is None:
generations_col = list(tqdm(generations_col_iter, total=len(generations), desc='Collating files'))
dataset = pd.DataFrame(generations_col)
else:
assert len(generations) % len(dataset) == 0
n = len(generations) // len(dataset)
print(f"Detected samples per prompt:", n)
generations_col = list(tqdm(batchify(generations_col_iter, n), total=len(dataset), desc='Collating files'))
dataset['generations'] = generations_col
dataset.to_json(output_file, orient='records', lines=True)
@click.command()
@click.argument('output-dir')
@click.option('--dataset-file', required=False, type=str,
help='JSONL file containing prompts data. Each row must contain a prompt at `row["prompt"]["text"]`.')
@click.option('--use-eos/--use-dataset', default=False, help='Whether to use EOS or a dataset file for generation.')
@click.option('--model', required=True, help='Equivalent to `model_name_or_path` in transformers.')
@click.option('--model-type', required=True,
type=click.Choice(ALLOWED_MODELS))
@click.option('--perspective-rate-limit', default=25)
@click.option('--n', default=25, help='Number of samples to generate for each prompt. When used with --eos')
@click.option('--max-tokens', default=20, help='Number of tokens (usually BPE) to generate for each prompt.')
@click.option('--batch-size', default=32)
@click.option('--start-index', default=0)
@click.option('--end-index', default=-1)
@click.option('--mcm-t', default=0.0)
@click.option('--mcm-keepmin', default=30)
@click.option('--top-k', default=50)
@click.option('--resume/--no-resume', default=False)
@click.option('--usemcm/--no-usemcm', default=True, help='Use mcm or not. When used with --model-type gpt2mcm')
def main(output_dir: str, dataset_file: Optional[str], use_eos: bool, model: str, model_type: str, n: int,
max_tokens: int, batch_size: int, perspective_rate_limit: int, start_index: int, end_index: int,
mcm_t: float, mcm_keepmin: int, top_k: int,
resume: bool, usemcm: bool):
# Load prompts
if dataset_file:
assert not use_eos
# Load prompts from dataset file
assert dataset_file.endswith('.jsonl')
dataset = pd.read_json(dataset_file, lines=True)
dataset = dataset[start_index:end_index]
prompts = pd.json_normalize(dataset['prompt'])['text']
elif use_eos:
assert not dataset_file
dataset = None
# Create EOS prompts
if model_type in ['gpt2', 'gpt2-affect', 'gpt2-naughty-list', 'pplm', 'gpt2mcm']:
prompts = pd.Series('<|endoftext|>')
elif model_type == 'gpt2-ctrl':
prompts = pd.Series('<|nontoxic|>')
elif model_type == 'ctrl':
# HACK: update gen_samples since we use it as our batch size for pipelines
prompts = pd.Series('').repeat(n // batch_size + 1)
n = batch_size
elif model_type == 'xlnet':
prompts = pd.Series('<s>').repeat(n // batch_size + 1)
n = batch_size
elif model_type == 'openai-gpt':
prompts = pd.Series('. ').repeat(n // batch_size + 1)
n = batch_size
elif model_type == 'gpt3':
prompts = pd.Series('').repeat(n // batch_size + 1)
else:
raise RuntimeError('Model not implemented with EOS prompts')
else:
raise click.exceptions.MissingParameter('Missing --dataset-file or --use-eos option.')
print('Prompts:', '\n', prompts)
# Create output files
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=resume)
generations_file = output_dir / 'generations.jsonl'
perspective_file = output_dir / 'perspective.jsonl'
output_file = output_dir / f'{"eos" if use_eos else "prompted"}_gens_{model_type}.jsonl'
# Create perspective worker thread
perspective = PerspectiveWorker(out_file=perspective_file,
total=len(prompts) * n,
rate_limit=perspective_rate_limit)
# Setup model for generation
# TODO: move this logic into generation.py
if model_type == 'gpt2':
generations_iter = gpt2(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file)
elif model_type == 'gpt2mcm':
generations_iter = gpt2mcm(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file,
threshold=mcm_t,
k=top_k,
min_token_number=mcm_keepmin,
use_mcm=usemcm)
elif model_type == 'gpt3':
generations_iter = gpt3(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file)
elif model_type == 'gpt2-greedy':
print("Using n=1 for greedy generation (sampling does not apply)")
generations_iter = gpt2(prompts=prompts,
max_len=max_tokens,
num_samples=1,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file,
sample=False)
elif model_type == 'gpt2-naughty-list':
# Load pre-tokenized naughty words
# FIXME: output dir must already exist with this file
with open(output_dir / 'gpt2_naughty_token_ids.pkl', 'rb') as f:
naughty_list_ids = pickle.load(f)
generations_iter = gpt2(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file,
bad_words_ids=naughty_list_ids)
elif model_type == 'gpt2-affect':
generations_iter = gpt2_affect(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file,
# Affect
target_class=0,
num_classes=2,
beta=1)
elif model_type == 'gpt2-ctrl':
generations_iter = gpt2_ctrl(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
model_name_or_path=model,
out_file=generations_file,
# GPT2-CTRL
prompt_ctrl_code='<|nontoxic|>')
elif model_type == 'openai-gpt':
generations_iter = openai_gpt(prompts=prompts,
max_len=max_tokens,
num_samples=n,
model_name_or_path=model,
out_file=generations_file)
elif model_type == 'ctrl':
assert model == 'ctrl'
generations_iter = ctrl(prompts=prompts,
max_len=max_tokens,
num_samples=n,
model_name_or_path=model,
out_file=generations_file,
# CTRL
ctrl_code='Links',
temperature=1.0,
repetition_penalty=1.2)
elif model_type == 'pplm':
generations_iter = pplm(prompts=prompts,
max_len=max_tokens,
num_samples=n,
batch_size=batch_size,
class_label=0,
num_iterations=10,
model_name_or_path='toxicity',
out_file=generations_file)
else:
raise NotImplementedError(f'Model {model} not implemented')
# Generate and collate perspective scores
generations = []
for i, gen in enumerate(generations_iter):
generations.append(gen)
perspective(f'generation-{i}', gen)
torch.cuda.empty_cache()
perspective.stop()
print('Finished generation and perspective scoring!')
print('Collating output files')
collate(dataset, generations, load_jsonl(perspective_file), output_file)
if __name__ == '__main__':
main()
| 2.203125 | 2 |
contrib/tasks/wsss/seeding/e2e_seeding/cam_sal_to_seed.py | HAL-42/AlchemyCat | 8 | 12758107 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@contact: <EMAIL>
@software: PyCharm
@file: cam_sal_to_seed.py
@time: 2020/3/27 1:10
@desc:
"""
import numpy as np
from contrib.tasks.wsss.seeding.e2e_seeding.resolve_loc_cue_conflict import resolve_loc_cue_conflict_by_area_order
__all__ = ["cam_sal_to_seed"]
def cam_sal_to_seed(cam, sal, cls_in_label, cam_thresh, sal_thresh, ignore_label) -> np.ndarray:
"""Get localization cues with method in SEC paper
Perform hard threshold for each foreground class
Args:
cam: (H, W, num_class - 1) cam
sal: (H, W) saliency map
cls_in_label: list of foreground classes
cam_thresh: hard threshold to extract foreground class cues
sal_thresh: hard threshold to extract background class cues
ignore_label: ignore label in class cues
Returns:
(H, W) seed
"""
loc_cue_proposal = np.zeros(shape=(cam.shape[0], cam.shape[1], cam.shape[2] + 1), dtype=np.int) # (H, W, num_class)
for cls_idx in range(1, len(cls_in_label)):
if cls_in_label[cls_idx] == 1:
heat_map = cam[:, :, cls_idx - 1]
loc_cue_proposal[:, :, cls_idx] = heat_map > cam_thresh * np.amax(heat_map)
if cls_in_label[0] == 1:
loc_cue_proposal[:, :, 0] = sal < sal_thresh
# handle conflict seed
seed = resolve_loc_cue_conflict_by_area_order(loc_cue_proposal, ignore_label, train_boat=True)
return seed
| 2.46875 | 2 |
python/sdm/houdini/image.py | sashaouellet/SDMTools | 7 | 12758108 | """This module represents various utilities and structures for image manipulation
__author__ = <NAME> (www.sashaouellet.com)
__version__ = 1.0.0
__date__ = 11/27/17
"""
import hou
import os
import imghdr
import subprocess
class ImageType():
EXR = '.exr'
RAT = '.rat'
HDR = '.hdr'
JPG = '.jpg'
PNG = '.png'
TIFF = '.tiff'
ALTERNATE_IMAGE_EXTS = [ImageType.RAT, ImageType.HDR]
def convertImage(file, maxDim, scale, ext):
"""Converts the given absolute file path to the given extension, using the icp command from $HFS/bin
Args:
file (str): The absolute file path of the image to convert. The converted image will
have the same path/filename, but with the given extension instead
maxDim (float): The maximimum dimension of either side of the outputted image. If the
image (after scaling) still does not meet this dimension, it will be further scaled
down
scale (float): The initial scale factor to apply to the outputted image. The final calculated
scale gets passed to icp with the -s flag
ext (sdm.houdini.image.ImageType): The image type to convert to
Returns:
str: The path to the outputted file
"""
args = [os.path.join(hou.getenv('HFS'), 'bin', 'icp')]
scale /= 100.0
resolution = hou.imageResolution(file)
width = float(resolution[0]) * scale
height = float(resolution[1]) * scale
resizeFactor = 1.0
if maxDim != -1: # Only calculate if user hasn't selected 'None'
resizeFactor = float(maxDim) / width
resizeFactor = min(resizeFactor, float(maxDim) / height) # if a smaller ratio, use that
if resizeFactor < 1.0: # only want to scale down
scale *= resizeFactor
args.append('-u') # uncompressed, if supported
args.append('-s')
args.append(str(float(scale * 100)))
newPath = os.path.splitext(file)[0] + ext
args.append(file)
args.append(newPath)
subprocess.call(args)
return newPath
def isImage(file):
"""Determines if the given absolute file path points to an image filetype
Args:
file (str): The absolute path of the file to test
Returns:
bool: True if the file is an image, otherwise False
"""
if imghdr.what(file) is not None:
return True
path, ext = os.path.splitext(file)
if ext in ALTERNATE_IMAGE_EXTS:
return True
return False | 2.859375 | 3 |
rcnn/dataset/visual_voc.py | chinakook/CTPN.mxnet | 78 | 12758109 | import cv2
from xml.etree import ElementTree as ET
import matplotlib.pyplot as plt
img = cv2.imread('/mnt/6B133E147DED759E/VOCdevkit/VOC2007/JPEGImages/img_1777.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
anno = ET.parse('/mnt/6B133E147DED759E/VOCdevkit/VOC2007/Annotations/img_1777.xml')
obj_node=anno.getiterator("object")
rects = []
for obj in obj_node:
bndbox = obj.find('bndbox')
xmin = bndbox.find('xmin')
ymin = bndbox.find('ymin')
xmax = bndbox.find('xmax')
ymax = bndbox.find('ymax')
rects.append(((int(xmin.text), int(ymin.text)), (int(xmax.text), int(ymax.text))))
for r in rects:
cv2.rectangle(img, r[0], r[1], (0,255,0),1)
plt.imshow(img)
plt.show() | 2.8125 | 3 |
model_results/resultparser.py | weimegan/fireroad053 | 0 | 12758110 | <filename>model_results/resultparser.py
import csv
import numpy as np
with open('model_results/results_stud1.csv', newline='') as csvfile:
data = list(csv.reader(csvfile))
with open('Class_Rename.csv', newline='') as csvfile2:
data2 = list(csv.reader(csvfile2))
results = np.array(data[1:])
classes = np.array(data2)
S,T = np.shape(results)
for t in range(T):
sem_classes = []
for s in range(S):
if results[s, t] == "1.0":
sem_classes.append(s)
class_names = []
for s in sem_classes:
class_names.append(classes[s,0])
print("for semester", t+1, "the student takes classes:", class_names)
| 3.109375 | 3 |
packages/sklearn/_bak/pre/svm getset test/nodes/svm getset test___GetParams0/svm getset test___GetParams0___METACODE.py | frecklebars/Ryven | 18 | 12758111 | from NIENV import *
# API METHODS --------------
# self.main_widget
# self.update_shape()
# Ports
# self.input(index)
# self.set_output_val(index, val)
# self.exec_output(index)
# self.create_new_input(type_, label, widget_name=None, widget_pos='under', pos=-1)
# self.delete_input(index)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(index)
# Logging
# mylog = self.new_log('Example Log')
# mylog.log('I\'m alive!!')
# self.log_message('hello global!', target='global')
# self.log_message('that\'s not good', target='error')
# --------------------------
class %CLASS%(NodeInstance):
def __init__(self, params):
super(%CLASS%, self).__init__(params)
self.special_actions['generate param outputs'] = {'method': M(self.init_param_ports)}
self.ready = False
# self.special_actions['action name'] = {'method': M(self.action_method)}
# ...
def update_event(self, input_called=-1):
if self.ready:
params = self.input(0).get_params()
i = 0
for key in params:
self.set_output_val(i, params[key])
i += 1
self.set_output_val(i, params)
def init_param_ports(self):
if self.input(0) == None:
return
model = self.input(0)
for i in range(len(self.outputs)):
self.delete_output(0)
params = model.get_params()
keyi = 0
for key in params:
self.create_new_output(type_="data", label=key, pos=-1)
self.set_output_val(keyi, params[key])
keyi += 1
self.create_new_output(type_="data", label="params dict", pos=-1)
self.set_output_val(keyi, params)
self.ready = True
def get_data(self):
data = {}
return data
def set_data(self, data):
pass
def removing(self):
pass
| 2.25 | 2 |
solver/experiment.py | pfnet-research/limited-gp | 5 | 12758112 | from cocoex import default_observers
from cocoex import Observer
from cocoex import Suite
from cocoex.utilities import ObserverOptions
from tqdm import tqdm
from typing import Callable # NOQA
from typing import Optional # NOQA
class Experiment(object):
def __init__(self,
solver,
suite_name="bbob",
suite_instance="",
suite_options="dimensions: 2,3",
algorithm_name=None):
# type: (Callable, str, str, str, Optional[str]) -> None
self._solver = solver
self._suite_name = suite_name
self._suite_instance = suite_instance
self._suite_options = suite_options
default_algorithm_name = '{}({})'.format(solver.__name__, solver.__module__)
self._algorithm_name = algorithm_name or default_algorithm_name
def run(self, budget=100, current_batch=1, number_of_batches=15):
# type: (int, int, int) -> None
suite = Suite(self._suite_name, self._suite_instance,
self._suite_options)
observer_name = default_observers()[self._suite_name]
observer_options = self._build_observer_options(budget)
observer = Observer(observer_name, observer_options.as_string)
for problem_index, problem in enumerate(tqdm(suite)):
if (problem_index % number_of_batches) != current_batch - 1:
continue
observer.observe(problem)
max_evals = budget * problem.dimension
self._solver(problem,
problem.lower_bounds,
problem.upper_bounds,
max_evals - problem.evaluations_constraints,
verbose=False)
def _build_observer_options(self, budget):
# type: (int) -> ObserverOptions
opts = {
'result_folder':
'"%s/on_%s_budget%04dxD"' %
(self._algorithm_name, self._suite_name, budget),
'algorithm_name': self._algorithm_name
}
return ObserverOptions(opts)
| 2.125 | 2 |
docs/bin/verify.py | Charcoal-Apple/PyDwarf | 49 | 12758113 | #!/usr/bin/env python
# coding: utf-8
'''
Poor man's unit test which goes through each of the example doctests in
docs/examples/ and makes sure they actually work as advertised.
'''
# import raws, pydwarf; df = pydwarf.df(raws)
import sys
import os
pydwarf_root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../..'))
sys.path.append(pydwarf_root)
sys.path.append(os.path.join(pydwarf_root, 'lib'))
import inspect
import doctest
import re
import raws
import pydwarf
from examples import examples
def verify(examples, skipreset=False, **globs):
docparser = doctest.DocTestParser()
docrunner = doctest.DocTestRunner()
results = []
testnum = 0
df = globs['df']
for example in examples:
if skipreset and 'reset' in example['flags']:
print('Skipping example %s' % example['name'])
else:
print('Running example %s' % example['name'])
testnum += 1
# Create the doctest object
test = docparser.get_doctest(
string = example['text'],
globs = globs,
name = example['name'],
filename = None,
lineno = None
)
# Actually run the test
resultcount = len(results)
docrunner.run(
test = test,
out = lambda result: results.append(result),
clear_globs = False
)
# Handle flags
if 'reset' in example['flags']:
print('Resetting df raws.')
df.reset()
return results
doctest_pattern = (
'(?s)\*+\n'
'Line (?P<line>\d+), in (?P<name>.*)\n'
'Failed example:\n'
'(?P<text>.*)\n'
'('
'Expected:\n'
'(?P<expected>.*)\n'
'Got:\n'
'(?P<got>.*?)'
'|'
'Exception raised:\n'
'(?P<exception>.*)'
')'
'\s*$'
)
doctest_result_re = re.compile(doctest_pattern)
if __name__ == '__main__':
print('Initializing session.')
conf = pydwarf.config.load(root=pydwarf_root, args={
'log': '',
'verbose': False,
})
session = pydwarf.session(raws, conf)
print('Running examples.')
results = verify(
examples,
df = session.df,
raws = raws,
pydwarf = pydwarf,
session = session,
conf = conf
)
realresults = []
lastfailurein = None
for result in results:
match = doctest_result_re.match(result.expandtabs(4))
if match:
groups = match.groupdict()
if groups['got'] and groups['expected']:
ignore = groups['got'].strip() == groups['expected'].strip()
else:
ignore = False
if groups['name'] == lastfailurein:
ignore = True
else:
lastfailurein = groups['name']
if not ignore: realresults.append(result)
if realresults:
resultstext = '\n\n'.join(reversed(realresults))
print(resultstext)
else:
print('Successfully ran all %d examples.' % len(examples))
| 2.6875 | 3 |
tests/commands/test_backtest.py | bmello4688/lean-cli | 0 | 12758114 | <reponame>bmello4688/lean-cli
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from pathlib import Path
from typing import Optional
from unittest import mock
from xml.etree import ElementTree
import pytest
from click.testing import CliRunner
from dependency_injector import providers
from lean.commands import lean
from lean.constants import DEFAULT_ENGINE_IMAGE
from lean.container import container
from lean.models.config import DebuggingMethod
from lean.models.docker import DockerImage
from tests.test_helpers import create_fake_lean_cli_directory
ENGINE_IMAGE = DockerImage.parse(DEFAULT_ENGINE_IMAGE)
@pytest.fixture(autouse=True)
def update_manager_mock() -> mock.Mock:
"""A pytest fixture which mocks the update manager before every test."""
update_manager = mock.Mock()
container.update_manager.override(providers.Object(update_manager))
return update_manager
def _generate_file(file: Path, content: str) -> None:
"""Writes to a file, which is created if it doesn't exist yet, and normalized the content before doing so.
:param file: the file to write to
:param content: the content to write to the file
"""
file.parent.mkdir(parents=True, exist_ok=True)
with file.open("w+", encoding="utf-8") as file:
file.write(content.strip() + "\n")
def test_backtest_calls_lean_runner_with_correct_algorithm_file() -> None:
create_fake_lean_cli_directory()
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
result = CliRunner().invoke(lean, ["backtest", "Python Project"])
assert result.exit_code == 0
lean_runner.run_lean.assert_called_once_with("backtesting",
Path("Python Project/main.py").resolve(),
mock.ANY,
ENGINE_IMAGE,
None)
def test_backtest_calls_lean_runner_with_default_output_directory() -> None:
create_fake_lean_cli_directory()
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
result = CliRunner().invoke(lean, ["backtest", "Python Project"])
assert result.exit_code == 0
lean_runner.run_lean.assert_called_once()
args, _ = lean_runner.run_lean.call_args
# This will raise an error if the output directory is not relative to Python Project/backtests
args[2].relative_to(Path("Python Project/backtests").resolve())
def test_backtest_calls_lean_runner_with_custom_output_directory() -> None:
create_fake_lean_cli_directory()
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
result = CliRunner().invoke(lean, ["backtest", "Python Project", "--output", "Python Project/custom"])
assert result.exit_code == 0
lean_runner.run_lean.assert_called_once()
args, _ = lean_runner.run_lean.call_args
# This will raise an error if the output directory is not relative to Python Project/custom-backtests
args[2].relative_to(Path("Python Project/custom").resolve())
def test_backtest_aborts_when_project_does_not_exist() -> None:
create_fake_lean_cli_directory()
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
result = CliRunner().invoke(lean, ["backtest", "This Project Does Not Exist"])
assert result.exit_code != 0
lean_runner.run_lean.assert_not_called()
def test_backtest_aborts_when_project_does_not_contain_algorithm_file() -> None:
create_fake_lean_cli_directory()
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
result = CliRunner().invoke(lean, ["backtest", "data"])
assert result.exit_code != 0
lean_runner.run_lean.assert_not_called()
def test_backtest_forces_update_when_update_option_given() -> None:
create_fake_lean_cli_directory()
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
result = CliRunner().invoke(lean, ["backtest", "Python Project", "--update"])
assert result.exit_code == 0
docker_manager.pull_image.assert_called_once_with(ENGINE_IMAGE)
lean_runner.run_lean.assert_called_once_with("backtesting",
Path("Python Project/main.py").resolve(),
mock.ANY,
ENGINE_IMAGE,
None)
def test_backtest_passes_custom_image_to_lean_runner_when_set_in_config() -> None:
create_fake_lean_cli_directory()
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
container.cli_config_manager().engine_image.set_value("custom/lean:123")
result = CliRunner().invoke(lean, ["backtest", "Python Project"])
assert result.exit_code == 0
lean_runner.run_lean.assert_called_once_with("backtesting",
Path("Python Project/main.py").resolve(),
mock.ANY,
DockerImage(name="custom/lean", tag="123"),
None)
def test_backtest_passes_custom_image_to_lean_runner_when_given_as_option() -> None:
create_fake_lean_cli_directory()
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
container.cli_config_manager().engine_image.set_value("custom/lean:123")
result = CliRunner().invoke(lean, ["backtest", "Python Project", "--image", "custom/lean:456"])
assert result.exit_code == 0
lean_runner.run_lean.assert_called_once_with("backtesting",
Path("Python Project/main.py").resolve(),
mock.ANY,
DockerImage(name="custom/lean", tag="456"),
None)
@pytest.mark.parametrize("value,debugging_method", [("pycharm", DebuggingMethod.PyCharm),
("PyCharm", DebuggingMethod.PyCharm),
("ptvsd", DebuggingMethod.PTVSD),
("PTVSD", DebuggingMethod.PTVSD),
("vsdbg", DebuggingMethod.VSDBG),
("VSDBG", DebuggingMethod.VSDBG),
("rider", DebuggingMethod.Rider),
("Rider", DebuggingMethod.Rider)])
def test_backtest_passes_correct_debugging_method_to_lean_runner(value: str, debugging_method: DebuggingMethod) -> None:
create_fake_lean_cli_directory()
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
result = CliRunner().invoke(lean, ["backtest", "Python Project/main.py", "--debug", value])
assert result.exit_code == 0
lean_runner.run_lean.assert_called_once_with("backtesting",
Path("Python Project/main.py").resolve(),
mock.ANY,
ENGINE_IMAGE,
debugging_method)
@pytest.mark.parametrize("image_option,update_flag,update_check_expected", [(None, True, False),
(None, False, True),
("custom/lean:3", True, False),
("custom/lean:3", False, False),
(DEFAULT_ENGINE_IMAGE, True, False),
(DEFAULT_ENGINE_IMAGE, False, True)])
def test_backtest_checks_for_updates(update_manager_mock: mock.Mock,
image_option: Optional[str],
update_flag: bool,
update_check_expected: bool) -> None:
create_fake_lean_cli_directory()
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
options = []
if image_option is not None:
options.extend(["--image", image_option])
if update_flag:
options.extend(["--update"])
result = CliRunner().invoke(lean, ["backtest", "Python Project", *options])
assert result.exit_code == 0
if update_check_expected:
update_manager_mock.warn_if_docker_image_outdated.assert_called_once_with(ENGINE_IMAGE)
else:
update_manager_mock.warn_if_docker_image_outdated.assert_not_called()
def test_backtest_auto_updates_outdated_python_pycharm_debug_config() -> None:
create_fake_lean_cli_directory()
workspace_xml_path = Path.cwd() / "Python Project" / ".idea" / "workspace.xml"
_generate_file(workspace_xml_path, """
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="RunManager" selected="Python Debug Server.Debug with Lean CLI">
<configuration name="Debug with Lean CLI" type="PyRemoteDebugConfigurationType" factoryName="Python Remote Debug">
<module name="LEAN" />
<option name="PORT" value="6000" />
<option name="HOST" value="localhost" />
<PathMappingSettings>
<option name="pathMappings">
<list>
<mapping local-root="$PROJECT_DIR$" remote-root="/LeanCLI" />
</list>
</option>
</PathMappingSettings>
<option name="REDIRECT_OUTPUT" value="true" />
<option name="SUSPEND_AFTER_CONNECT" value="true" />
<method v="2" />
</configuration>
<list>
<item itemvalue="Python Debug Server.Debug with Lean CLI" />
</list>
</component>
</project>
""")
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
result = CliRunner().invoke(lean, ["backtest", "Python Project", "--debug", "pycharm"])
assert result.exit_code == 1
workspace_xml = ElementTree.fromstring(workspace_xml_path.read_text(encoding="utf-8"))
assert workspace_xml.find(".//mapping[@remote-root='/LeanCLI']") is None
assert workspace_xml.find(".//mapping[@remote-root='/Lean/Launcher/bin/Debug']") is not None
def test_backtest_auto_updates_outdated_python_vscode_debug_config() -> None:
create_fake_lean_cli_directory()
launch_json_path = Path.cwd() / "Python Project" / ".vscode" / "launch.json"
_generate_file(launch_json_path, """
{
"version": "0.2.0",
"configurations": [
{
"name": "Debug with Lean CLI",
"type": "python",
"request": "attach",
"connect": {
"host": "localhost",
"port": 5678
},
"pathMappings": [
{
"localRoot": "${workspaceFolder}",
"remoteRoot": "/LeanCLI"
}
]
}
]
}
""")
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
result = CliRunner().invoke(lean, ["backtest", "Python Project", "--debug", "ptvsd"])
assert result.exit_code == 0
launch_json = json.loads(launch_json_path.read_text(encoding="utf-8"))
assert len(launch_json["configurations"]) == 1
assert launch_json["configurations"][0] == {
"name": "Debug with Lean CLI",
"type": "python",
"request": "attach",
"connect": {
"host": "localhost",
"port": 5678
},
"pathMappings": [
{
"localRoot": "${workspaceFolder}",
"remoteRoot": "/Lean/Launcher/bin/Debug"
}
]
}
def test_backtest_auto_updates_outdated_csharp_vscode_debug_config() -> None:
create_fake_lean_cli_directory()
launch_json_path = Path.cwd() / "CSharp Project" / ".vscode" / "launch.json"
_generate_file(launch_json_path, """
{
"version": "0.2.0",
"configurations": [
{
"name": "Debug with Lean CLI",
"request": "attach",
"type": "mono",
"address": "localhost",
"port": 55556
}
]
}
""")
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
result = CliRunner().invoke(lean, ["backtest", "CSharp Project", "--debug", "vsdbg"])
assert result.exit_code == 0
launch_json = json.loads(launch_json_path.read_text(encoding="utf-8"))
assert len(launch_json["configurations"]) == 1
assert launch_json["configurations"][0] == {
"name": "Debug with Lean CLI",
"request": "attach",
"type": "coreclr",
"processId": "1",
"pipeTransport": {
"pipeCwd": "${workspaceRoot}",
"pipeProgram": "docker",
"pipeArgs": ["exec", "-i", "lean_cli_vsdbg"],
"debuggerPath": "/root/vsdbg/vsdbg",
"quoteArgs": False
},
"logging": {
"moduleLoad": False
}
}
def test_backtest_auto_updates_outdated_csharp_rider_debug_config() -> None:
create_fake_lean_cli_directory()
for dir_name in [".idea.CSharp Project", ".idea.CSharp Project.dir"]:
_generate_file(Path.cwd() / "CSharp Project" / ".idea" / dir_name / ".idea" / "workspace.xml", """
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="RunManager">
<configuration name="Debug with Lean CLI" type="ConnectRemote" factoryName="Mono Remote" show_console_on_std_err="false" show_console_on_std_out="false" port="55556" address="localhost">
<option name="allowRunningInParallel" value="false" />
<option name="listenPortForConnections" value="false" />
<option name="selectedOptions">
<list />
</option>
<method v="2" />
</configuration>
</component>
</project>
""")
docker_manager = mock.Mock()
container.docker_manager.override(providers.Object(docker_manager))
lean_runner = mock.Mock()
container.lean_runner.override(providers.Object(lean_runner))
result = CliRunner().invoke(lean, ["backtest", "CSharp Project", "--debug", "rider"])
assert result.exit_code == 1
for dir_name in [".idea.CSharp Project", ".idea.CSharp Project.dir"]:
workspace_xml_path = Path.cwd() / "CSharp Project" / ".idea" / dir_name / ".idea" / "workspace.xml"
workspace_xml = ElementTree.fromstring(workspace_xml_path.read_text(encoding="utf-8"))
assert workspace_xml.find(".//configuration[@name='Debug with Lean CLI']") is None
| 1.945313 | 2 |
kick/device2/general/actions/power_bar.py | CiscoDevNet/firepower-kickstart | 2 | 12758115 | """Perform power-bar options on a device."""
import logging
import re
import telnetlib
import time
try:
from kick.misc.convert_bytes import string_to_bytes, bytes_to_string
except ImportError:
from kick.miscellaneous.convert import string_to_bytes, bytes_to_string
LOGGER = logging.getLogger(__name__)
HANDLER = logging.StreamHandler()
FORMATTER = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(HANDLER)
def power_bar(
power_server,
port,
action='status',
user='admn',
pwd='<PASSWORD>'):
"""Telnet to power-bar and perform the specified action
:param power_server: name or IP Address of power-bar
:param port: port of the device to perform power action
:param action: status, on, off, or reboot
:param user: power-bar credential
:param pwd: power-bar credential
:return:
"""
actions = ['status', 'on', 'off', 'reboot']
LOGGER.info('power_bar %s port=%s action=%s' % (power_server, port, action))
if not action.lower() in actions:
raise ValueError('action should be one of %s' % str(actions))
# defined common patterns for the different versions of PDU
pattern = r'.*\.?%s +[^ ]+ +([^ ]+).*' % port if action.lower() == 'status' \
else r'.*(Command successful).*'
try:
# match both 'Switched PDU:' and 'Switched CDU:' menu selection
prompt = [string_to_bytes("Switched .*:")]
session = telnetlib.Telnet(power_server)
session.read_until(string_to_bytes("Username:"))
session.write(string_to_bytes(user + '\n'))
session.read_until(string_to_bytes("Password:"))
session.write(string_to_bytes(pwd + '\n'))
session.expect(prompt)
session.write(string_to_bytes('%s .%s\n' % (action, port)))
# expect returns a tuple, where the last element is
# the text read up till and including the match
result = session.expect(prompt)[2]
for line in result.splitlines():
LOGGER.debug(line)
return re.search(pattern, bytes_to_string(result), re.IGNORECASE).group(1)
finally:
try:
session.close()
except:
pass
def power_cycle_all_ports(power_bar_server, power_bar_port, power_bar_user, power_bar_pwd):
"""Powers off and then powers on all given ports.
:param power_bar_server: comma-separated string of IP addresses of the PDU's
:param power_bar_port: comma-separated string of power port on the PDU's
:param power_bar_user: comma-separated usernames for power bar servers
:param power_bar_pwd: comma-separated passwords for power bar servers
:return: True if all ports were powered off and on successfully, False otherwise
"""
result = True
power_bar_servers = [server.strip() for server in power_bar_server.split(',')]
power_bar_ports = [port.strip() for port in power_bar_port.split(',')]
power_bar_users = [user.strip() for user in power_bar_user.split(',')]
power_bar_pwds = [pwd.strip() for pwd in power_bar_pwd.split(',')]
LOGGER.info('->Power off all power ports')
for server, port, user, pwd in zip(power_bar_servers, power_bar_ports, power_bar_users, power_bar_pwds):
LOGGER.info('->Power off {} {}'.format(server, port))
result = result and power_bar(server, port, action='off', user=user, pwd=pwd)
LOGGER.info('->Done ')
time.sleep(10)
LOGGER.info("Sleeping for 60 secs..")
time.sleep(60)
LOGGER.info('->Power on all power ports')
for server, port, user, pwd in zip(power_bar_servers, power_bar_ports, power_bar_users, power_bar_pwds):
LOGGER.info('->Power on {} {}'.format(server, port))
result = result and power_bar(server, port, action='on', user=user, pwd=<PASSWORD>)
LOGGER.info('->Done')
time.sleep(10)
return result
| 2.9375 | 3 |
firstApp/models.py | dppeykov/Dj_RAC | 0 | 12758116 | from django.db import models
# Create your models here.
class Employee(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=20)
sal = models.DecimalField(max_digits=10, decimal_places=3)
def __str__(self):
return f'Employee object with ID: {self.id}, Name: {self.name} and Salary: {self.sal}' | 2.921875 | 3 |
tests/unit/vic_run/test_calc_snow_coverage.py | lingyunan0510/VIC | 1 | 12758117 | from vic.vic import ffi
from vic import lib as vic_lib
def test_calc_snow_coverage_no_change():
store_snow = ffi.new('_Bool *')
max_snow_depth = ffi.new('double *')
store_swq = ffi.new('double *')
snow_distrib_slope = ffi.new('double *')
store_coverage = ffi.new('double *')
old_coverage = 0.75
coverage = vic_lib.calc_snow_coverage(
store_snow, 0.5, old_coverage, 1.25, 1.25, 2.3, 2.3, 0.,
max_snow_depth, 0., store_swq, snow_distrib_slope, store_coverage)
assert coverage == old_coverage
def test_calc_snow_coverage_increased():
store_snow = ffi.new('_Bool *')
store_snow[0] = True
max_snow_depth = ffi.new('double *')
max_snow_depth[0] = 3.
store_swq = ffi.new('double *')
store_swq[0] = 0.5
snow_distrib_slope = ffi.new('double *')
snow_distrib_slope[0] = 0.5
store_coverage = ffi.new('double *')
store_coverage[0] = 0.75
old_coverage = 0.75
coverage = vic_lib.calc_snow_coverage(
store_snow, 0.5, old_coverage, 1.25, 1.5, 2.3, 3., 0., max_snow_depth,
0.25, store_swq, snow_distrib_slope, store_coverage)
assert coverage > old_coverage
| 2 | 2 |
backend/app/bug_killer_api_interface/schemas/request/project.py | SeanFitzpatrick0/BugKiller | 0 | 12758118 | from typing import Optional, List
from pydantic import Field, validator
from bug_killer_api_interface.test.test_doubles.default_values import mock_project_title, mock_project_description, \
mock_project_tags, mock_member_id
from bug_killer_app.test.test_doubles.default_values import mock_manager_id
from bug_killer_utils.collections import remove_duplicates_in_list
from bug_killer_utils.model.bk_base_model import BkBaseModel
class CreateProjectPayload(BkBaseModel):
""" Payload used to create a new project """
title: str = Field(description='The title of the project to create')
description: str = Field(description='The title of the project to create')
members: List[str] = Field(
default_factory=list,
description='List of members to be added to the project. It should be a list of cognito user ids'
)
tags: List[str] = Field(default_factory=list, description='List of tags to be added to the project to create')
@validator('members', 'tags', pre=True)
def set_values(cls, value: List[str]) -> List[str]:
return sorted(remove_duplicates_in_list(value))
@classmethod
def test_double( # type: ignore[override]
cls, *,
title: Optional[str] = None,
description: Optional[str] = None,
members: Optional[List[str]] = None,
tags: Optional[List[str]] = None
) -> 'CreateProjectPayload':
return cls(
title=title or mock_project_title,
description=description or mock_project_description,
members=members or [mock_member_id],
tags=tags or mock_project_tags
)
class UpdateProjectPayload(BkBaseModel):
""" Payload used to update an existing project """
title: Optional[str] = Field(None, description='The new title to set')
description: Optional[str] = Field(None, description='The new description to set')
manager: Optional[str] = Field(None, description='The cognito user id of the new manager of the project')
members: Optional[List[str]] = Field(None, description='The new list of members that the project should have')
tags: Optional[List[str]] = Field(None, description='The new list of tags to set')
@validator('members', 'tags')
def set_values(cls, value: Optional[List[str]]) -> Optional[List[str]]:
if value is None:
return None
return sorted(remove_duplicates_in_list(value))
@classmethod
def test_double( # type: ignore[override]
cls, *,
title: Optional[str] = None,
description: Optional[str] = None,
manager: Optional[str] = None,
members: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
) -> 'UpdateProjectPayload':
return cls(
title=title or mock_project_title,
description=description or mock_project_description,
manager=manager or mock_manager_id,
members=members or [mock_member_id],
tags=tags or mock_project_tags
)
| 2.46875 | 2 |
atrial_model/iNa/scripts/mini_optimize.py | hundlab/iNaCells2021Code | 0 | 12758119 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 15 12:48:30 2020
@author: grat05
"""
import sys
import os
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir)))
#from iNa_models import Koval_ina, OHaraRudy_INa
import atrial_model
from atrial_model.iNa.models import OHaraRudy_INa, Koval_ina
import atrial_model.run_sims_functions
from atrial_model.run_sims_functions import peakCurr, normalized2val, calcExpTauInact, monoExp,\
calcExpTauAct, triExp, biExp
from atrial_model.run_sims import calc_diff
from atrial_model.iNa.define_sims import sim_fs, datas,\
keys_all, exp_parameters
from atrial_model.iNa.model_setup import model, mp_locs, sub_mps, sub_mp_bounds, model_params_initial
from atrial_model.parse_cmd_args import args
import atrial_model.run_sims_functions
from atrial_model.run_sims import calc_results, SimResults
from atrial_model.iNa.define_sims import sim_fs, datas, keys_all, exp_parameters
from atrial_model.iNa.model_setup import model_params_initial, mp_locs, sub_mps, model
from atrial_model.optimization_functions import lstsq_wrap, save_results
from multiprocessing import Pool
from scipy import optimize
import numpy as np
import matplotlib.pyplot as plt
from functools import partial
import pickle
import datetime
import numpy as np
import pickle
from threading import Timer
from multiprocessing import Manager
from functools import partial
import os
import pyDOE
from scipy.stats import distributions
keys_keep = []
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# # #iv curve
# keys_iin = [
# ('8928874_7', 'Dataset C day 1'), ('8928874_7', 'Dataset C day 3'),
# ('8928874_7', 'Dataset C day 5'), ('8928874_7', 'Dataset C fresh'),
# #('12890054_3', 'Dataset C Control'), ('12890054_3', 'Dataset D Control'),
# #('12890054_5', 'Dataset C Control'), ('12890054_5', 'Dataset D Control'),
# ('1323431_1', 'Dataset B'), ('1323431_3', 'Dataset A 2'),
# ('1323431_3', 'Dataset A 20'), ('1323431_3', 'Dataset A 5'),
# ('1323431_4', 'Dataset B Control'),
# ('21647304_1', 'Dataset B Adults'), ('21647304_1', 'Dataset B Pediatrics')
# ]
# keys_keep += keys_iin
# ##activation normalized to driving force
# keys_iin = [
# ('1323431_2', 'Dataset'),\
# ('8928874_7', 'Dataset D fresh'), ('8928874_7', 'Dataset D day 1'),\
# ('8928874_7', 'Dataset D day 3'), ('8928874_7', 'Dataset D day 5'),\
# ('21647304_3', 'Dataset A Adults'), ('21647304_3', 'Dataset A Pediatrics')
# ]
# keys_keep += keys_iin
# I2/I1 Recovery
# keys_iin = [('1323431_8', 'Dataset A -140'), ('1323431_8', 'Dataset A -120'),\
# ('1323431_8', 'Dataset A -100'),\
# ('21647304_3', 'Dataset C Adults'), ('21647304_3', 'Dataset C Pediatrics'),\
# ('8928874_9', 'Dataset fresh'), ('8928874_9', 'Dataset day 1'),\
# ('8928874_9', 'Dataset day 3'), ('8928874_9', 'Dataset day 5')
# ]
# keys_keep += keys_iin
# # #recovery normalized to preprepulse
# keys_iin = [\
# ('7971163_6', 'Dataset -75'),\
# ('7971163_6', 'Dataset -85'),\
# ('7971163_6', 'Dataset -95'),\
# ('7971163_6', 'Dataset -105'),\
# ('7971163_6', 'Dataset -115'),
# ('7971163_6', 'Dataset -125'),\
# ('7971163_6', 'Dataset -135')
# ]
# keys_keep += keys_iin
#inactivation normalized to no prepulse
keys_iin = [
('7971163_4', 'Dataset 32ms'), ('7971163_4', 'Dataset 64ms'),
('7971163_4', 'Dataset 128ms'), ('7971163_4', 'Dataset 256ms'),
('7971163_4', 'Dataset 512ms'),\
# ('8928874_8', 'Dataset C fresh'), ('8928874_8', 'Dataset C day 1'),\
# ('8928874_8', 'Dataset C day 3'), ('8928874_8', 'Dataset C day 5')
]
#('21647304_3', 'Dataset B Adults'), ('21647304_3', 'Dataset B Pediatrics')
keys_keep += keys_iin
# #inactivation normalized to first
# keys_iin = [('7971163_5', 'Dataset A -65'), ('7971163_5', 'Dataset A -75'),\
# ('7971163_5', 'Dataset A -85'), ('7971163_5', 'Dataset A -95'),\
# ('7971163_5', 'Dataset A -105')
# ]
# keys_keep += keys_iin
#tau inactivation
keys_iin = [('8928874_8', 'Dataset E fresh')#, ('8928874_8', 'Dataset E day 1'),\
# ('8928874_8', 'Dataset E day 3'), ('8928874_8', 'Dataset E day 5')]#,\
# ('1323431_5', 'Dataset B fast'),\
# ('21647304_2', 'Dataset C Adults'), ('21647304_2', 'Dataset C Pediactric')
]
keys_keep += keys_iin
#####tau activation
# keys_iin = [('8928874_8', 'Dataset D fresh'), ('8928874_8', 'Dataset D day 1'),\
# ('8928874_8', 'Dataset D day 3'), ('8928874_8', 'Dataset D day 5'),
# #('7971163_3', 'Dataset C')
# ]
# keys_keep += keys_iin
# #tau inactivation fast & slow
# keys_iin = [('21647304_2', 'Dataset C Adults'), ('21647304_2', 'Dataset D Adults'),\
# ('21647304_2', 'Dataset C Pediactric'), ('21647304_2', 'Dataset D Pediactric')]
# #('1323431_5', 'Dataset B fast'),('1323431_5', 'Dataset B slow'),\
# keys_keep += keys_iin
# #tau inactivation normalized to first
# keys_iin = [('1323431_6', 'Dataset -80'), ('1323431_6', 'Dataset -100')]
# keys_keep += keys_iin
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
atrial_model.run_sims_functions.plot1 = False #sim
atrial_model.run_sims_functions.plot2 = False #diff
atrial_model.run_sims_functions.plot3 = False #tau
keys_keep = set(keys_keep)
sim_fs = {key: sim_f for key, sim_f in sim_fs.items() if key in keys_keep}
datas = {key: data for key, data in datas.items() if key in keys_keep}
if __name__ == '__main__':
with Pool() as proc_pool:
mp_locs = list(set(mp_locs))
sub_mps = model_params_initial[mp_locs]
sub_mp_bounds = np.array(model().param_bounds)[mp_locs]
min_res = []
all_res = []
# accept_test=partial(check_bounds, bounds=sub_mp_bounds))
# minimizer_kwargs = {"method": "BFGS", "options": {"maxiter":100}}
diff_fn = partial(calc_diff, model_parameters_full=model_params_initial,\
mp_locs=mp_locs, sim_func=sim_fs, data=datas,\
pool=proc_pool,ssq=True,\
results=all_res, prnt_err=False)#
minimizer_kwargs = {"method": lstsq_wrap, "options":{"ssq": False}}#"bounds": sub_mp_bounds,
# res = optimize.basinhopping(diff_fn, sub_mps, \
# minimizer_kwargs=minimizer_kwargs,\
# niter=10, T=80,\
# callback=partial(save_results, results=min_res),\
# stepsize=1)#T=80
res = optimize.dual_annealing(diff_fn, bounds=sub_mp_bounds,
no_local_search=True,
local_search_options=minimizer_kwargs,
maxiter=10,maxfun=2000)
print("Optimization done")
# param_intervals = np.squeeze(np.diff(np.array(model.param_bounds)))
# rand_mps = pyDOE.lhs(model.num_params, samples=300)
# rand_mps = distributions.norm(loc=res.x, scale=param_intervals/2).ppf(rand_mps)
# rand_res = []
# diff_fn = partial(calc_diff, model_parameters_full=model_params_initial,\
# mp_locs=mp_locs, sim_func=sim_fs, data=datas,\
# pool=proc_pool,ssq=True,\
# results=rand_res, prnt_err=False)#
# for i in range(rand_mps.shape[0]):
# diff_fn(rand_mps[i])
# res = optimize.least_squares(diff_fn, sub_mps, \
# bounds=np.array(model().param_bounds)[mp_locs].T)
atrial_model.run_sims_functions.plot2 = True
diff_fn(res.x) | 1.476563 | 1 |
test/test_policy.py | Acrosure/acrosure-python-sdk | 0 | 12758120 | <filename>test/test_policy.py
import unittest
from acrosure_sdk import AcrosureClient
from acrosure_sdk.policy import PolicyManager
from .constants import (
TEST_PUBLIC_KEY,
)
import os
API_URL = os.environ.get('API_URL')
class PolicyTestCase(unittest.TestCase):
POLICIES = []
def setUp( self ):
self.client = AcrosureClient(TEST_PUBLIC_KEY, API_URL)
self.policy = self.client.policy
def test_1_instance_of_acrosure( self ):
client = self.client
policy = self.policy
self.assertIsInstance(client, AcrosureClient)
self.assertIsInstance(policy, PolicyManager)
def test_2_list_policies( self ):
policy = self.policy
resp = policy.list()
self.assertEqual(resp["status"], "ok")
policies = resp["data"]
self.assertIsInstance(policies, list)
self.assertTrue(len(policies) > 0)
self.__class__.POLICIES = policies
def test_3_get_policy_detail( self ):
policy = self.policy
policy_id = self.__class__.POLICIES[0]["id"]
resp = policy.get(policy_id)
self.assertEqual(resp["status"], "ok")
policy_detail = resp["data"]
self.assertIsInstance(policy_detail, dict)
self.assertEqual(policy_detail["id"], policy_id)
if __name__ == '__main__':
unittest.main()
| 2.796875 | 3 |
tests/commands/test_copy.py | open-contracting/ocds-index | 0 | 12758121 | import os
import traceback
from collections import defaultdict
from click.testing import CliRunner
from ocdsindex.cli.__main__ import main
from tests import elasticsearch, search
def test_copy(tmpdir):
host = os.getenv("ELASTICSEARCH_URL", "localhost:9200")
runner = CliRunner()
with elasticsearch(host) as es:
result = runner.invoke(main, ["index", host, os.path.join("tests", "fixtures", "data.json")])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
assert result.output == ""
es.indices.refresh("ocdsindex_en")
es.indices.refresh("ocdsindex_es")
source = "https://standard.open-contracting.org/dev/"
destination = "https://standard.open-contracting.org/copy/"
result = runner.invoke(main, ["copy", host, source, destination])
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
assert result.output == ""
for index, value in (("ocdsindex_en", 8), ("ocdsindex_es", 1)):
hits = search(es, index)
counts = defaultdict(int)
for hit in hits["hits"]:
counts[hit["_source"]["base_url"]] += 1
assert counts == {
source: value,
destination: value,
}
assert hits["total"]["value"] == value * 2
| 2.296875 | 2 |
example/views.py | ephes/wagtail_srcset | 11 | 12758122 | from django.views.generic import TemplateView
from wagtail.images.models import Image as WagtailImage
class MainView(TemplateView):
template_name = "index.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["img"] = WagtailImage.objects.first()
return context
| 1.976563 | 2 |
tensorflow_gnn/models/gat_v2/layers.py | tensorflow/gnn | 611 | 12758123 | """Contains a Graph Attention Network v2 and associated layers."""
from typing import Any, Callable, Mapping, Optional, Union
import tensorflow as tf
import tensorflow_gnn as tfgnn
@tf.keras.utils.register_keras_serializable(package="GNN>models>gat_v2")
class GATv2Conv(tfgnn.keras.layers.AnyToAnyConvolutionBase):
"""The multi-head attention from Graph Attention Networks v2 (GATv2).
GATv2 (https://arxiv.org/abs/2105.14491) improves upon the popular
GAT architecture (https://arxiv.org/abs/1710.10903) by allowing the network
to compute a more expressive "dynamic" instead of just "static" attention,
each of whose heads is described by Equations (7), (3) and (4) in
https://arxiv.org/abs/2105.14491.
Example: GATv2-style attention on incoming edges whose result is
concatenated with the old node state and passed through a Dense layer
to compute the new node state.
```
dense = tf.keras.layers.Dense
graph = tfgnn.keras.layers.GraphUpdate(
node_sets={"paper": tfgnn.keras.layers.NodeSetUpdate(
{"cites": tfgnn.keras.layers.GATv2Conv(
message_dim, receiver_tag=tfgnn.TARGET)},
tfgnn.keras.layers.NextStateFromConcat(dense(node_state_dim)))}
)(graph)
```
This layer implements the multi-head attention of GATv2 with the following
generalizations:
* This implementation of GATv2 attends only to edges that are explicitly
stored in the input GraphTensor. Attention of a node to itself is
enabled or disabled by storing or not storing an explicit loop in the
edge set. The example above uses a separate layer to combine the old
node state with the attention result to form the new node state.
* Attention values can be computed from a sender node state that gets
broadcast onto the edge (see arg `sender_node_feature`), from an edge
feature (see arg `sender_edge_feature`), or from their concatenation
(by setting both arguments). This choice is used in place of the sender
node state $h_j$ in the defining equations cited above.
* This layer can be used with `receiver_tag=tfgnn.CONTEXT` to perform a
convolution to the context, with graph components as receivers and the
containment in graph components used in lieu of edges.
* An `edge_dropout` option is provided.
This layer can also be configured to do attention pooling from edges to
context or to receiver nodes (without regard for source nodes) by setting
`sender_node_feature=None` and setting `sender_edge_feature=...` to the
applicable edge feature name (e.g., `tfgnn.DEFAULT_FEATURE_NAME`).
Like the Keras Dense layer, if the input features have rank greater than 2,
this layer computes a point-wise attention along the last axis of the inputs.
For example, if the input features have shape [num_nodes, 2, 4, 1], then it
will perform an identical computation on each of the num_nodes * 2 * 4 input
values.
Init args:
num_heads: The number of attention heads.
per_head_channels: The number of channels for each attention head. This
means that the final output size will be per_head_channels * num_heads.
receiver_tag: one of `tfgnn.SOURCE`, `tfgnn.TARGET` or `tfgnn.CONTEXT`.
The results of attention are aggregated for this graph piece.
If set to `tfgnn.SOURCE` or `tfgnn.TARGET`, the layer can be called for
an edge set and will aggregate results at the specified endpoint of the
edges.
If set to `tfgnn.CONTEXT`, the layer can be called for an edge set or
node set.
If left unset for init, the tag must be passed at call time.
receiver_feature: Can be set to override `tfgnn.DEFAULT_FEATURE_NAME`
for use as the receiver's input feature to attention. (The attention key
is derived from this input.)
sender_node_feature: Can be set to override `tfgnn.DEFAULT_FEATURE_NAME`
for use as the input feature from sender nodes to attention.
IMPORANT: Must be set to `None` for use with `receiver_tag=tfgnn.CONTEXT`
on an edge set, or for pooling from edges without sender node states.
sender_edge_feature: Can be set to a feature name of the edge set to select
it as an input feature. By default, this set to `None`, which disables
this input.
IMPORTANT: Must be set for use with `receiver_tag=tfgnn.CONTEXT`
on an edge set.
use_bias: If true, a bias term is added to the transformations of query and
value inputs.
edge_dropout: Can be set to a dropout rate for edge dropout. (When pooling
nodes to context, it's the node's membership in a graph component that
is dropped out.)
attention_activation: The nonlinearity used on the transformed inputs
before multiplying with the trained weights of the attention layer.
This can be specified as a Keras layer, a tf.keras.activations.*
function, or a string understood by tf.keras.layers.Activation().
Defaults to "leaky_relu", which in turn defaults to a negative slope
of alpha=0.2.
activation: The nonlinearity applied to the final result of attention,
specified in the same ways as attention_activation.
kernel_initializer: Can be set to a `kerner_initializer` as understood
by tf.keras.layers.Dense etc.
"""
def __init__(self,
*,
num_heads: int,
per_head_channels: int,
receiver_tag: Optional[tfgnn.IncidentNodeOrContextTag] = None,
receiver_feature: tfgnn.FieldName = tfgnn.HIDDEN_STATE,
sender_node_feature: Optional[
tfgnn.FieldName] = tfgnn.HIDDEN_STATE,
sender_edge_feature: Optional[tfgnn.FieldName] = None,
use_bias: bool = True,
edge_dropout: float = 0.,
attention_activation: Union[str,
Callable[..., Any]] = "leaky_relu",
activation: Union[str, Callable[..., Any]] = "relu",
kernel_initializer: Union[
None, str, tf.keras.initializers.Initializer] = None,
**kwargs):
kwargs.setdefault("name", "gat_v2_conv")
super().__init__(
receiver_tag=receiver_tag,
receiver_feature=receiver_feature,
sender_node_feature=sender_node_feature,
sender_edge_feature=sender_edge_feature,
extra_receiver_ops={"softmax": tfgnn.softmax},
**kwargs)
if not self.takes_receiver_input:
raise ValueError("Receiver feature cannot be None")
if num_heads <= 0:
raise ValueError(f"Number of heads {num_heads} must be greater than 0.")
self._num_heads = num_heads
if per_head_channels <= 0:
raise ValueError(
f"Per-head channels {per_head_channels} must be greater than 0.")
self._per_head_channels = per_head_channels
self._use_bias = use_bias
if not 0 <= edge_dropout < 1:
raise ValueError(f"Edge dropout {edge_dropout} must be in [0, 1).")
self._edge_dropout = edge_dropout
self._attention_activation = tf.keras.activations.get(attention_activation)
self._activation = tf.keras.activations.get(activation)
self._kernel_initializer = kernel_initializer
# Create the transformations for the query input in all heads.
self._w_query = tf.keras.layers.Dense(
per_head_channels * num_heads,
kernel_initializer=kernel_initializer,
# This bias gets added to the attention features but not the outputs.
use_bias=use_bias,
name="query")
# Create the transformations for value input from sender nodes and edges.
if self.takes_sender_node_input:
self._w_sender_node = tf.keras.layers.Dense(
per_head_channels * num_heads,
kernel_initializer=kernel_initializer,
# This bias gets added to the attention features and the outputs.
use_bias=use_bias,
name="value_node")
else:
self._w_sender_node = None
if self.takes_sender_edge_input:
self._w_sender_edge = tf.keras.layers.Dense(
per_head_channels * num_heads,
kernel_initializer=kernel_initializer,
# This bias would be redundant with self._w_sender_node.
use_bias=use_bias and self._w_sender_node is None,
name="value_edge")
else:
self._w_sender_edge = None
if self._w_sender_node is None and self._w_sender_edge is None:
raise ValueError("GATv2Attention initialized with no inputs.")
# Create attention logits layers, one for each head. Note that we can't
# use a single Dense layer that outputs `num_heads` units because we need
# to apply a different attention function a_k to its corresponding
# W_k-transformed features.
self._attention_logits_fn = tf.keras.layers.experimental.EinsumDense(
"...ik,ki->...i",
output_shape=(None, num_heads, 1), # TODO(b/205825425): (num_heads,)
kernel_initializer=kernel_initializer,
name="attn_logits")
def get_config(self):
return dict(
num_heads=self._num_heads,
per_head_channels=self._per_head_channels,
use_bias=self._use_bias,
edge_dropout=self._edge_dropout,
attention_activation=self._attention_activation,
activation=self._activation,
kernel_initializer=self._kernel_initializer,
**super().get_config())
def convolve(self, *,
sender_node_input: Optional[tf.Tensor],
sender_edge_input: Optional[tf.Tensor],
receiver_input: Optional[tf.Tensor],
broadcast_from_sender_node: Callable[[tf.Tensor], tf.Tensor],
broadcast_from_receiver: Callable[[tf.Tensor], tf.Tensor],
pool_to_receiver: Callable[..., tf.Tensor],
extra_receiver_ops: Optional[
Mapping[str, Callable[..., Any]]] = None,
training: bool) -> tf.Tensor:
# Form the attention query for each head.
# [num_items, *extra_dims, num_heads, channels_per_head]
assert receiver_input is not None, "__init__() should have checked this."
query = broadcast_from_receiver(self._split_heads(self._w_query(
receiver_input)))
# Form the attention value by transforming the configured inputs
# and adding up the transformed values.
# [num_items, *extra_dims, num_heads, channels_per_head]
value_terms = []
if sender_node_input is not None:
value_terms.append(broadcast_from_sender_node(
self._split_heads(self._w_sender_node(sender_node_input))))
if sender_edge_input is not None:
value_terms.append(
self._split_heads(self._w_sender_edge(sender_edge_input)))
assert value_terms, "Internal error: no values, __init__ should catch this."
value = tf.add_n(value_terms)
# Compute the features from which attention logits are computed.
# [num_items, *extra_dims, num_heads, channels_per_head]
attention_features = self._attention_activation(query + value)
# Compute the attention logits and softmax to get the coefficients.
# [num_items, *extra_dims, num_heads, 1]
logits = tf.expand_dims(self._attention_logits_fn(attention_features), -1)
attention_coefficients = extra_receiver_ops["softmax"](logits)
if training:
# Apply dropout to the normalized attention coefficients, as is done in
# the original GAT paper. This should have the same effect as edge
# dropout. Also, note that tf.nn.dropout upscales the remaining values,
# which should maintain the sum-up-to-1 per node in expectation.
attention_coefficients = tf.nn.dropout(attention_coefficients,
self._edge_dropout)
# Apply the attention coefficients to the transformed query.
# [num_items, *extra_dims, num_heads, per_head_channels]
messages = value * attention_coefficients
# Take the sum of the weighted values, which equals the weighted average.
# Receivers without incoming senders get the empty sum 0.
# [num_receivers, *extra_dims, num_heads, per_head_channels]
pooled_messages = pool_to_receiver(messages, reduce_type="sum")
# Apply the nonlinearity.
pooled_messages = self._activation(pooled_messages)
pooled_messages = self._merge_heads(pooled_messages)
return pooled_messages
# The following helpers map forth and back between tensors with...
# - a separate heads dimension: shape [..., num_heads, channels_per_head],
# - all heads concatenated: shape [..., num_heads * channels_per_head].
def _split_heads(self, tensor):
extra_dims = tensor.shape[1:-1] # Possibly empty.
if not extra_dims.is_fully_defined():
raise ValueError(
"GATv2Attention requires non-ragged Tensors as inputs, "
"and GraphTensor requires these to have statically known "
f"dimensions except the first, but got {tensor.shape}")
new_shape = (-1, *extra_dims, self._num_heads, self._per_head_channels)
return tf.reshape(tensor, new_shape)
def _merge_heads(self, tensor):
num_merged = 2
extra_dims = tensor.shape[1 : -num_merged] # Possibly empty.
merged_dims = tensor.shape[-num_merged:]
if not extra_dims.is_fully_defined() or not merged_dims.is_fully_defined():
raise ValueError(
f"Unexpected unknown dimensions in shape {tensor.shape}")
new_shape = (-1, *extra_dims, merged_dims.num_elements())
return tf.reshape(tensor, new_shape)
def GATv2EdgePool(*, # To be called like a class initializer. pylint: disable=invalid-name
num_heads: int,
per_head_channels: int,
receiver_tag: Optional[tfgnn.IncidentNodeOrContextTag] = None,
receiver_feature: tfgnn.FieldName = tfgnn.HIDDEN_STATE,
sender_feature: tfgnn.FieldName = tfgnn.HIDDEN_STATE,
**kwargs):
"""Returns a layer for pooling edges with GATv2-style attention.
When initialized with receiver_tag SOURCE or TARGET, the returned layer can
be called on an edge set to compute the weighted sum of edge states at the
given endpoint. The weights are computed by the method of Graph Attention
Networks v2 (GATv2), except that edge states, not node states broadcast from
the edges' other endpoint, are used as input values to attention.
When initialized with receiver_tag CONTEXT, the returned layer can be called
on an edge set to do the analogous pooling of edge states to context.
NOTE: This layer cannot pool node states. For that, use GATv2Conv.
Args:
num_heads: The number of attention heads.
per_head_channels: The number of channels for each attention head. This
means that the final output size will be per_head_channels * num_heads.
receiver_tag: The results of attention are aggregated for this graph piece.
If set to `tfgnn.CONTEXT`, the layer can be called for an edge set or
node set.
If set to an IncidentNodeTag (e.g., `tfgnn.SOURCE` or `tfgnn.TARGET`),
the layer can be called for an edge set and will aggregate results at
the specified endpoint of the edges.
If left unset, the tag must be passed when calling the layer.
receiver_feature: By default, the default state feature of the receiver
is used to compute the attention query. A different feature name can be
selected by setting this argument.
sender_feature: By default, the default state feature of the edge set is
used to compute the attention values. A different feature name can be
selected by setting this argument.
**kwargs: Any other option for GATv2Conv, except sender_node_feature,
which is set to None.
"""
if kwargs.pop("sender_node_feature", None) is not None:
raise TypeError("GATv2EdgePool() got an unexpected keyword argument "
"'sender_node_feature'. Did you mean GATv2Conv()?")
kwargs.setdefault("name", "gat_v2_edge_pool")
return GATv2Conv(
num_heads=num_heads,
per_head_channels=per_head_channels,
receiver_tag=receiver_tag,
receiver_feature=receiver_feature,
sender_edge_feature=sender_feature,
sender_node_feature=None,
**kwargs)
def GATv2GraphUpdate(*, # To be called like a class initializer. pylint: disable=invalid-name
num_heads: int,
per_head_channels: int,
edge_set_name: str,
feature_name: str = tfgnn.HIDDEN_STATE,
name: str = "gat_v2",
**kwargs):
"""Returns a GraphUpdater layer with a Graph Attention Network V2 (GATv2).
The returned layer performs one update step of a Graph Attention Network v2
(GATv2) from https://arxiv.org/abs/2105.14491 on an edge set of a GraphTensor.
It is best suited for graphs that have just that one edge set.
For heterogeneous graphs with multiple node sets and edge sets, users are
advised to consider a GraphUpdate with one or more GATv2Conv objects
instead.
This implementation of GAT attends only to edges that are explicitly stored
in the input GraphTensor. Attention of a node to itself requires having an
explicit loop in the edge set.
Args:
num_heads: The number of attention heads.
per_head_channels: The number of channels for each attention head. This
means that the final output size will be per_head_channels * num_heads.
edge_set_name: A GATv2 update happens on this edge set and its incident
node set(s) of the input GraphTensor.
feature_name: The feature name of node states; defaults to
tfgnn.HIDDEN_STATE.
name: Optionally, a name for the layer returned.
**kwargs: Any optional arguments to GATv2Conv, see there.
"""
# Compat logic, remove in late 2021.
if "output_feature_name" in kwargs:
raise TypeError("Argument 'output_feature_name' is no longer supported.")
# Build a GraphUpdate for the target node set of the given edge_set_name.
# That needs to be deferred until we see a GraphTensorSpec that tells us
# the node_set_name.
def deferred_init_callback(spec: tfgnn.GraphTensorSpec):
node_set_name = spec.edge_sets_spec[
edge_set_name].adjacency_spec.node_set_name(tfgnn.TARGET)
node_set_updates = {
node_set_name: tfgnn.keras.layers.NodeSetUpdate(
{edge_set_name: GATv2Conv(
num_heads=num_heads, per_head_channels=per_head_channels,
receiver_tag=tfgnn.TARGET,
sender_node_feature=feature_name, receiver_feature=feature_name,
**kwargs)},
next_state=NextStateForNodeSetFromSingleEdgeSetInput(),
node_input_feature=feature_name)}
return dict(node_sets=node_set_updates)
return tfgnn.keras.layers.GraphUpdate(
deferred_init_callback=deferred_init_callback, name=name)
# For use by GATv2GraphUpdate().
@tf.keras.utils.register_keras_serializable(package="GNN>models>gat_v2")
class NextStateForNodeSetFromSingleEdgeSetInput(tf.keras.layers.Layer):
def call(self, inputs):
unused_node_input, edge_inputs, unused_context_input = inputs
single_edge_set_input, = edge_inputs.values() # Unpack.
return single_edge_set_input
| 2.65625 | 3 |
volttron/platform/vip/agent/subsystems/hello.py | bl33m/volttron | 1 | 12758124 | <filename>volttron/platform/vip/agent/subsystems/hello.py<gh_stars>1-10
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import logging
import weakref
from .base import SubsystemBase
from ..errors import VIPError
from ..results import ResultsDictionary
from zmq import ZMQError
from zmq.green import ENOTSOCK
from volttron.platform.vip.socket import Message
__all__ = ['Hello']
_log = logging.getLogger(__name__)
class Hello(SubsystemBase):
""" The hello subsystem allows an agent to determine its identity.
The identity is possibly a dynamically generated uuid from which the
executing agent does not know. This subsystem allows the agent to be
able to determine it's identity from a peer. By default that peer is
the connected router, however this could be another agent.
"""
def __init__(self, core):
self.core = weakref.ref(core)
self._results = ResultsDictionary()
core.register('hello', self._handle_hello, self._handle_error)
def hello(self, peer=''):
""" Receives a welcome message from the peer (default to '' router)
The welcome message will respond with a 3 element list:
- The vip version (default 1.0)
- The peer who responded (should be the same as `peer` argument
to this function.
- The id of the requester (i.e. this object). This will be the
identity when the agent connects to the router or the specified
identity when the `Agent` is constructed.
:param peer: The peer to receive the response from.
:return: [version, peer, identity]
"""
_log.info('{0} Requesting hello from peer ({1})'.format(self.core().identity, peer))
result = next(self._results)
connection = self.core().connection
if not connection:
_log.error("Connection object not yet created".format(self.core().identity))
else:
try:
connection.send_vip(peer, 'hello', args=['hello'], msg_id=result.ident)
except ZMQError as exc:
if exc.errno == ENOTSOCK:
_log.error("Socket send on non socket {}".format(self.core().identity))
return result
__call__ = hello
def _handle_hello(self, message):
_log.info('Handling hello message {}'.format(message))
try:
# zmq
op = message.args[0]
except IndexError:
_log.error('missing hello subsystem operation')
return
if op == 'hello':
message.user = ''
message.args = ['welcome', '1.0', self.core.identity, message.peer]
self.core().connection.send_vip_object(message, copy=False)
elif op == 'welcome':
try:
result = self._results.pop(message.id)
except KeyError:
return
result.set([arg for arg in message.args[1:]])
else:
_log.error('unknown hello subsystem operation')
def _handle_error(self, sender, message, error, **kwargs):
try:
result = self._results.pop(message.id)
except KeyError:
return
result.set_exception(error)
| 1.546875 | 2 |
Day1/FM_AFM_example.py | brittanyyylu/cornerstones_week_2_day_1 | 0 | 12758125 | # Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
******** Ferromagnetic chain ********
1. Try to make an anti-ferromagnetic chain
2. What happens when you add linear biases on either end of a ferromagnetic chain?
3. What happens when you modify the J's so they aren't all the same value?
'''
from dwave.system import EmbeddingComposite, DWaveSampler
import dwave.inspector as inspector
# Modifiable parameters
num_qubits = 10 # Number of qubits in our chain
fm_qubit_bias = [0] * num_qubits # List of biases to apply to each qubit in our chain
fm_coupler_strength = -1 # The coupling we want to apply to two adjacent qubits
# Ising model parameters
h = fm_qubit_bias
J = {}
for i in range(num_qubits-1):
J[(i, i+1)] = fm_coupler_strength
# Submit the problem to the QPU
sampler = EmbeddingComposite(DWaveSampler(solver={'qpu': True}))
sampleset = sampler.sample_ising(h, J, num_reads=10)
inspector.show(sampleset)
print("Ferromagetic QPU response")
print(sampleset) | 2.546875 | 3 |
basic/12_exception.py | onezens/python | 0 | 12758126 | #!/usr/bin/python
#encoding=utf8
#通用异常信息处理方式
try: #可能发生异常的代码
open('123.txt', 'r')
except Exception, e: #发生异常后执行
print(e)
else: #没有发生异常之后执行
pass
finally: #不管有没有发生异常都会执行
pass
#指定异常信息处理
try:
print(a)
except (IOError, NameError), errorMsg:
print("error message: %s" %errorMsg)
| 3.140625 | 3 |
robots/wagtail_hooks.py | zerolab/wagtail-robots | 1 | 12758127 | <reponame>zerolab/wagtail-robots
from wagtail.contrib.modeladmin.options import ModelAdmin, modeladmin_register
from robots.models import Rule
class RuleAdmin(ModelAdmin):
model = Rule
menu_label = 'robots.txt'
menu_icon = "redirect"
add_to_settings_menu = True
list_display = (
'robot', 'affected_sites', 'allowed_urls',
'disallowed_urls', 'crawl_delay')
def affected_sites(self, obj):
sites = obj.sites.all()
if sites:
return ",".join([s.site_name for s in sites])
else:
return "All sites."
affected_sites.short_description = 'sites'
def allowed_urls(self, obj):
urls = obj.allowed.all()
if urls:
return " ".join([u.pattern for u in urls])
else:
return None
allowed_urls.short_description = 'allowed'
def disallowed_urls(self, obj):
urls = obj.disallowed.all()
if urls:
return " ".join([u.pattern for u in urls])
else:
return None
allowed_urls.short_description = 'disallowed'
modeladmin_register(RuleAdmin)
| 2.28125 | 2 |
{{ cookiecutter.project_slug }}/{{ cookiecutter.project_slug }}/orm.py | artslob/cookiecutter-python | 0 | 12758128 | from uuid import uuid4
import sqlalchemy
from sqlalchemy import TIMESTAMP, Boolean, Column, String
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.sql import expression
naming_convention = {
"ix": "ix_%(column_0_N_label)s",
"uq": "uq_%(table_name)s_%(column_0_N_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_N_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s",
}
metadata = sqlalchemy.MetaData(naming_convention=naming_convention)
User = sqlalchemy.Table(
"user",
metadata,
Column("uid", UUID(), default=uuid4, primary_key=True),
Column("username", String(), nullable=False, unique=True),
Column("disabled", Boolean(), nullable=False, default=False, server_default=expression.false()),
Column("hashed_password", String(), nullable=False),
Column("is_admin", Boolean(), server_default=expression.false(), nullable=False),
Column("created", TIMESTAMP(timezone=True), nullable=False, index=True),
)
| 2.640625 | 3 |
tests/utils/postprocess/test_math.py | ToucanToco/toucan-data-sdk | 9 | 12758129 | <filename>tests/utils/postprocess/test_math.py
from functools import partial
import pandas as pd
import pytest
from toucan_data_sdk.utils.postprocess import (
absolute_values,
add,
divide,
formula,
multiply,
round_values,
subtract,
)
from toucan_data_sdk.utils.postprocess.math import (
DEPRECATED_COLUMN_QUOTE_CHARS,
LOGGER,
FormulaError,
Token,
_parse_formula,
get_new_syntax_formula,
)
def test_math_operations_with_column():
""" It should return result for basic math operations with a column name"""
data = pd.DataFrame([{'value1': 10, 'value2': 20}, {'value1': 17, 'value2': 5}])
kwargs = {'new_column': 'result', 'column_1': 'value1', 'column_2': 'value2'}
res = add(data, **kwargs)
expected_col = [30, 22]
assert res['result'].tolist() == expected_col
res = subtract(data, **kwargs)
expected_col = [-10, 12]
assert res['result'].tolist() == expected_col
res = multiply(data, **kwargs)
expected_col = [200, 85]
assert res['result'].tolist() == expected_col
res = divide(data, **kwargs)
expected_col = [0.5, 3.4]
assert res['result'].tolist() == expected_col
def test_math_operations_with_number():
""" It should return result for basic math operations with a constant number"""
data = pd.DataFrame([{'value1': 10}, {'value1': 17}])
kwargs = {'new_column': 'value1', 'column_1': 'value1', 'column_2': 0.25}
res = add(data.copy(), **kwargs)
expected_col = [10.25, 17.25]
assert res['value1'].tolist() == expected_col
res = subtract(data.copy(), **kwargs)
expected_col = [9.75, 16.75]
assert res['value1'].tolist() == expected_col
res = multiply(data.copy(), **kwargs)
expected_col = [2.5, 4.25]
assert res['value1'].tolist() == expected_col
res = divide(data.copy(), **kwargs)
expected_col = [40.0, 68.0]
assert res['value1'].tolist() == expected_col
data = pd.DataFrame([{'value1': 10}, {'value1': 25}])
kwargs = {'new_column': 'result', 'column_1': 2, 'column_2': 'value1'}
res = add(data.copy(), **kwargs)
expected_col = [12, 27]
assert res['result'].tolist() == expected_col
res = divide(data.copy(), **kwargs)
expected_col = [0.2, 0.08]
assert res['result'].tolist() == expected_col
def test_bad_arg():
""" It should raise an error when calling a math operation with a bad parameter """
data = pd.DataFrame([{'value1': 10}, {'value1': 17}])
kwargs = {'new_column': 'value1', 'column_1': 'value1', 'column_2': [1, 2]}
with pytest.raises(TypeError) as exc_info:
add(data.copy(), **kwargs)
assert str(exc_info.value) == 'column_2 must be a string, an integer or a float'
data = pd.DataFrame([{'value1': 10}, {'value1': 17}])
kwargs = {'new_column': 'value1', 'column_1': {'bad': 'type'}, 'column_2': 'value1'}
with pytest.raises(TypeError) as exc_info:
divide(data.copy(), **kwargs)
assert str(exc_info.value) == 'column_1 must be a string, an integer or a float'
def test_token():
t = Token(' ')
assert len(t) == 0
assert repr(t) == "''"
assert t == ''
assert t.get_text() == ''
t1 = Token(' a ', quoted=True)
assert t1.get_text() == '`a`'
t2 = Token('a')
assert t1 == t2
# DEPRECATED: OLD SYNTAX
def test_old_parse_formula():
old_parse_formula = partial(_parse_formula, quote_chars=DEPRECATED_COLUMN_QUOTE_CHARS)
assert old_parse_formula('a') == ['a']
assert old_parse_formula('a+b') == ['a', '+', 'b']
assert old_parse_formula('pika + chuuu') == ['pika', '+', 'chuuu']
assert old_parse_formula('pika + (chuuu/10)') == ['pika', '+', '(', 'chuuu', '/', '10', ')']
assert old_parse_formula('pika + (chu uu/10)') == ['pika', '+', '(', 'chu uu', '/', '10', ')']
assert old_parse_formula('pika + (chu_uu/10)') == ['pika', '+', '(', 'chu_uu', '/', '10', ')']
assert old_parse_formula('pika + ("chu-uu"/10)') == ['pika', '+', '(', 'chu-uu', '/', '10', ')']
assert old_parse_formula('a + b*3.1') == ['a', '+', 'b', '*', '3', '.', '1']
assert old_parse_formula('a + "b*3.1"') == ['a', '+', 'b*3.1']
assert old_parse_formula('("and-another" - yet_another) / (and - another)') == [
'(',
'and-another',
'-',
'yet_another',
')',
'/',
'(',
'and',
'-',
'another',
')',
]
assert old_parse_formula("pika + ('chu-uu'/10)") == ['pika', '+', '(', 'chu-uu', '/', '10', ')']
assert old_parse_formula('pika + (\'chu-uu\'/10)') == [ # noqa: Q0
'pika',
'+',
'(',
'chu-uu',
'/',
'10',
')',
]
assert old_parse_formula("pika + (\"chu-uu\"/10)") == [ # noqa: Q0
'pika',
'+',
'(',
'chu-uu',
'/',
'10',
')',
]
with pytest.raises(FormulaError) as e:
old_parse_formula('pika + ("chu-uu/10)')
assert str(e.value) == 'Missing closing quote in formula'
def test_get_new_syntax_formula():
assert get_new_syntax_formula('a') == '`a`'
assert get_new_syntax_formula('a+b') == '`a`+`b`'
assert get_new_syntax_formula('pika + chuuu') == '`pika`+`chuuu`'
assert get_new_syntax_formula('pika + (chuuu/10)') == '`pika`+(`chuuu`/10)'
assert get_new_syntax_formula('pika + (chu_uu/10)') == '`pika`+(`chu_uu`/10)'
assert get_new_syntax_formula('pika + ("chu-uu"/10)') == '`pika`+(`chu-uu`/10)'
assert get_new_syntax_formula('a + b*3.1') == '`a`+`b`*3.1'
assert get_new_syntax_formula('a + "b*3.1"') == '`a`+`b*3.1`'
old = '("and-another" - yet_another) / (and - another)'
assert get_new_syntax_formula(old) == '(`and-another`-`yet_another`)/(`and`-`another`)'
assert get_new_syntax_formula("pika + ('chu-uu'/10)") == '`pika`+(`chu-uu`/10)'
assert get_new_syntax_formula('pika + (\'chu-uu\'/10)') == '`pika`+(`chu-uu`/10)' # noqa: Q0
assert get_new_syntax_formula("pika + (\"chu-uu\"/10)") == '`pika`+(`chu-uu`/10)' # noqa: Q0
# DEPRECATED: OLD SYNTAX
def test_formula_old_syntax(mocker):
df = pd.DataFrame(
{
'a': [1, 3],
'b': [2, 4],
'other col': [3, 5],
'yet_another': [2, 2],
'and-another': [2, 2],
}
)
with pytest.raises(FormulaError) as exc_info:
formula(df, new_column='c', formula='a, + b')
assert str(exc_info.value) == '"a," is not a valid column name'
with pytest.raises(FormulaError) as exc_info:
formula(df, new_column='c', formula='import ipdb')
assert str(exc_info.value) == '"import ipdb" is not a valid column name'
log_warning = mocker.patch.object(LOGGER, 'warning')
res = formula(df, new_column='c', formula='a + b')
assert res['c'].tolist() == [3, 7]
log_warning.assert_called_once_with(
"DEPRECATED: You should always use ` for your columns. Old syntax: 'a + b', new syntax: '`a`+`b`'"
)
res = formula(df, new_column='c', formula='.5*a - b')
assert res['c'].tolist() == [-1.5, -2.5]
res = formula(df, new_column='c', formula='a + other col')
assert res['c'].tolist() == [4, 8]
res = formula(df, new_column='c', formula='a + other col / 2')
assert res['c'].tolist() == [2.5, 5.5]
res = formula(df, new_column='c', formula='a + other col // 2')
assert res['c'].tolist() == [2, 5]
res = formula(df, new_column='c', formula='(a + other col)/ 2.')
assert res['c'].tolist() == [2, 4]
res = formula(df, new_column='c', formula='(a + b ) % 3')
assert res['c'].tolist() == [0, 1]
res = formula(df, new_column='c', formula='yet_another + b')
assert res['c'].tolist() == [4, 6]
res = formula(df, new_column='c', formula='(yet_another + b ) % 3')
assert res['c'].tolist() == [1, 0]
with pytest.raises(FormulaError):
formula(df, new_column='c', formula='and-another - yet_another')
res = formula(df, new_column='c', formula='"and-another" - yet_another')
assert res['c'].tolist() == [0, 0]
def test_formula():
df = pd.DataFrame(
{
'a': [1, 3],
'b': [2, 4],
'other col': [3, 5],
'yet_another': [2, 2],
'and-another': [2, 2],
}
)
with pytest.raises(FormulaError) as exc_info:
formula(df, new_column='c', formula='`a,` + `b`')
assert str(exc_info.value) == '"a," is not a valid column name'
with pytest.raises(FormulaError) as exc_info:
formula(df, new_column='c', formula='`import ipdb`')
assert str(exc_info.value) == '"import ipdb" is not a valid column name'
res = formula(df, new_column='c', formula='`a` + `b`')
assert res['c'].tolist() == [3, 7]
res = formula(df, new_column='c', formula='.5*`a` - `b`')
assert res['c'].tolist() == [-1.5, -2.5]
res = formula(df, new_column='c', formula='`a` + `other col`')
assert res['c'].tolist() == [4, 8]
res = formula(df, new_column='c', formula='`a` + `other col` / 2')
assert res['c'].tolist() == [2.5, 5.5]
res = formula(df, new_column='c', formula='`a` + `other col` // 2')
assert res['c'].tolist() == [2, 5]
res = formula(df, new_column='c', formula='(`a` + `other col`)/ 2.')
assert res['c'].tolist() == [2, 4]
res = formula(df, new_column='c', formula='(`a` + `b` ) % 3')
assert res['c'].tolist() == [0, 1]
res = formula(df, new_column='c', formula='`yet_another` + `b`')
assert res['c'].tolist() == [4, 6]
res = formula(df, new_column='c', formula='(`yet_another` + `b` ) % 3')
assert res['c'].tolist() == [1, 0]
with pytest.raises(FormulaError):
formula(df, new_column='c', formula='`and`-`another` - `yet_another`')
res = formula(df, new_column='c', formula='`and-another` - `yet_another`')
assert res['c'].tolist() == [0, 0]
def test_formula_number_columns():
df = pd.DataFrame({'2017': [3, 2], '2018': [8, -1]})
res = formula(df, new_column='evo', formula='2018 - 2017')
assert res['evo'].tolist() == [1, 1]
# DEPRECATED: OLD SYNTAX
res = formula(df, new_column='evo', formula='"2018" - "2017"')
assert res['evo'].tolist() == [5, -3]
res = formula(df, new_column='evo', formula='`2018` - `2017`')
assert res['evo'].tolist() == [5, -3]
# ~~ round_values & absolute_values ~~~
data = pd.DataFrame(
[
{'ENTITY': 'A', 'VALUE_1': -1.563, 'VALUE_2': -1.563},
{'ENTITY': 'A', 'VALUE_1': 0.423, 'VALUE_2': 0.423},
{'ENTITY': 'A', 'VALUE_1': 0, 'VALUE_2': 0},
{'ENTITY': 'A', 'VALUE_1': -1.612, 'VALUE_2': 1.612},
]
)
def test_round_values():
df = round_values(data.copy(), column='VALUE_1', decimals=1)
assert df['VALUE_1'].tolist() == [-1.6, 0.4, 0, -1.6]
def test_absolute_values():
df = absolute_values(data.copy(), column='VALUE_1')
assert df['VALUE_1'].tolist() == [1.563, 0.423, 0, 1.612]
| 2.90625 | 3 |
jorldy/test/core/network/test_rainbow_network.py | zenoengine/JORLDY | 300 | 12758130 | import torch
from core.network.rainbow import Rainbow
def test_rainbow_call():
D_in, D_out, D_hidden = 2, 3, 4
N_atom = 5
noise_type = "factorized"
net = Rainbow(
D_in=D_in, D_out=D_out, N_atom=N_atom, noise_type=noise_type, D_hidden=D_hidden
)
batch_size = 6
mock_input = torch.rand((batch_size, D_in))
out = net(mock_input, is_train=True)
assert out.shape == (batch_size, D_out, N_atom)
| 2.265625 | 2 |
binding.gyp | johnbotris/electron-dragdrop-win | 0 | 12758131 | {
'targets': [
{
'target_name': 'electron-dragdrop-win',
'include_dirs': [
'<!(node -e "require(\'nan\')")',
],
'defines': [ 'UNICODE', '_UNICODE'],
'sources': [
],
'conditions': [
['OS=="win"', {
'sources': [
"src/addon.cpp",
"src/Worker.cpp",
"src/v8utils.cpp",
"src/ole/DataObject.cpp",
"src/ole/DropSource.cpp",
"src/ole/EnumFormat.cpp",
"src/ole/Stream.cpp",
"src/ole/ole.cpp"
],
}],
['OS!="win"', {
'sources': [
"src/addon-unsupported-platform.cc"
],
}]
]
}
]
}
| 1.070313 | 1 |
scripts/predict.py | gsGupta11/Review-Analyser | 0 | 12758132 | <reponame>gsGupta11/Review-Analyser<gh_stars>0
from keras.models import load_model,model_from_json
import numpy as np
import tensorflow as tf
import cv2
def mask(test_image):
json_file = open('../model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = tf.keras.models.model_from_json(loaded_model_json)
loaded_model.load_weights("../model.h5")
# print("Loaded model from disk")
# test_image = cv2.imread("../test.jpg",1)
test_image = cv2.resize(test_image, (64, 64))
test_image = np.expand_dims(test_image,axis=0)
result = list(loaded_model.predict(test_image)[0])
print(result)
if result[1] > result[0]:
return "Not Masked"
else:
return "Masked" | 2.671875 | 3 |
util/png_to_jpeg.py | Jim-Lin/dark-classifier | 2 | 12758133 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import os
from PIL import Image
from multiprocessing import Pool
from functools import partial
def convert(dir_from_sub, dir_to_sub, file):
file_from = os.path.join(dir_from_sub, file)
file_from_size = os.path.getsize(file_from)
if file_from_size == 0:
return
filename = os.path.splitext(file)[0]
file_to = os.path.join(dir_to_sub, filename + ".jpg")
if not os.path.exists(file_to):
im = Image.open(file_from)
im.convert('RGB').save(file_to, 'JPEG')
print file_to
def main(args):
dir_from = args.dir_from
dir_to = args.dir_to
if not os.path.exists(dir_to):
os.makedirs(dir_to)
ids = [f for f in os.listdir(dir_from) if not f.startswith('.') and f.find('.t7') == -1]
for actress_id in ids:
dir_from_sub = os.path.join(dir_from, actress_id)
files = [f for f in os.listdir(dir_from_sub) if not f.startswith('.')]
if len(files) >= 40:
dir_to_sub = os.path.join(dir_to, actress_id)
if not os.path.exists(dir_to_sub):
os.makedirs(dir_to_sub)
func = partial(convert, dir_from_sub, dir_to_sub)
pool = Pool()
pool.map(func, files)
pool.close()
pool.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dir_from',
type=str,
help='png dir',
required=True
)
parser.add_argument(
'--dir_to',
type=str,
help='jpeg dir',
required=True
)
args = parser.parse_args()
main(args)
| 2.6875 | 3 |
app/readers/exceptions.py | capralifecycle/office-games-client | 0 | 12758134 | def format_reader_port_message(message, reader_port, error):
return f'{message} with {reader_port}. Error: {error}'
def format_reader_message(message, vendor_id, product_id, serial_number):
return f'{message} with ' \
f'vendor_id={vendor_id}, ' \
f'product_id={product_id} and ' \
f'serial_number={serial_number}'
class ReaderNotFound(Exception):
def __init__(self, vendor_id, product_id, serial_number):
super(ReaderNotFound, self).__init__(
format_reader_message('No RFID Reader found', vendor_id, product_id, serial_number)
)
class ReaderCouldNotConnect(Exception):
def __init__(self, reader_port, error):
super(ReaderCouldNotConnect, self).__init__(
format_reader_port_message('Could not connect to reader', reader_port, error)
)
| 3.078125 | 3 |
choleskyError.py | nick-terry/Splitting-GP | 1 | 12758135 | <reponame>nick-terry/Splitting-GP
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 16:50:23 2020
@author: pnter
"""
import torch
import gpytorch
import pickle
class GPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood, kernel):
super(GPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = kernel
self.likelihood = likelihood
self.initTraining()
def forward(self,x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
def initTraining(self):
#Switch to training mode
self.train()
self.likelihood.train()
#Setup optimizer
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.1)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self)
#Perform training iterations
for i in range(200):
self.optimizer.zero_grad()
output = self(self.train_inputs[0])
loss = -mll(output, self.train_targets)
loss.backward()
self.optimizer.step()
def predict(self,x):
#Switch to eval/prediction mode
self.eval()
self.likelihood.eval()
with torch.no_grad(), gpytorch.settings.fast_pred_var():
prediction = self.likelihood(self(x))
return prediction
train_x = [torch.tensor([[ 0.3878, -0.1837],
[ 0.3878, -0.1020],
[ 1.0000, 0.6735]])]
train_y = torch.tensor([0.2710, 0.2042, 0.3384])
kernel = gpytorch.kernels.RBFKernel(ard_num_dims=2)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPModel(train_x, train_y, likelihood, kernel)
with open('error_state_dict','rb') as f:
err_state_dict = pickle.load(f)
model.load_state_dict(err_state_dict)
model.predict(model.train_inputs[0].unsqueeze(0))
fantasy_x1 = torch.tensor([[ 0.3061, -0.2245]])
fantasy_y1 = torch.tensor([0.2633])
model = model.get_fantasy_model(fantasy_x1,fantasy_y1)
| 2.3125 | 2 |
deployment.py | Frizzles7/dynamic-risk-assessment-system | 0 | 12758136 | <reponame>Frizzles7/dynamic-risk-assessment-system
#!/usr/bin/env python3
"""
Script to copy the model files to deployment directory
Author: <NAME>
Date: October 7, 2021
"""
from flask import Flask, session, jsonify, request
import pandas as pd
import numpy as np
import pickle
import os
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import json
import shutil
# Load config.json and correct path variable
with open('config.json','r') as f:
config = json.load(f)
dataset_csv_path = os.path.join(config['output_folder_path'])
model_path = os.path.join(config['output_model_path'])
prod_deployment_path = os.path.join(config['prod_deployment_path'])
# function for deployment
def store_model_into_pickle():
'''
Copy the model pickle file, the latest score text file, and the text file
listing the data files ingested into the deployment directory
'''
# copy model pickle file
shutil.copy(
os.path.join(os.getcwd(), model_path,'trainedmodel.pkl'),
os.path.join(os.getcwd(), prod_deployment_path,'trainedmodel.pkl')
)
# copy latest score text file
shutil.copy(
os.path.join(os.getcwd(), model_path,'latestscore.txt'),
os.path.join(os.getcwd(), prod_deployment_path,'latestscore.txt')
)
# copy list of ingested data files
shutil.copy(
os.path.join(os.getcwd(), dataset_csv_path,'ingestedfiles.txt'),
os.path.join(os.getcwd(), prod_deployment_path,'ingestedfiles.txt')
)
if __name__ == '__main__':
store_model_into_pickle()
| 2.203125 | 2 |
record.py | a276me/iris | 1 | 12758137 |
from scipy.io import wavfile
import sounddevice as sd
# import speech_recognition as sr
import os
from pyaudio import PyAudio, paInt16, paFloat32
from scipy.io.wavfile import *
from random import randint
import time
import wave
import base64
import matplotlib.pyplot as plt
import soundfile as sf
# print(o)
#
# exit(122)
# sd.play('./tmp/float32speech.wav')
# sd.wait()
fs=16000
myrecording = sd.rec(int(4 * fs), samplerate=fs, channels=1, dtype='int16')
sd.wait() # Wait until recording is finished
print('finished')
sd.play(myrecording, fs)
write(f'./tmp/test.wav', fs, myrecording)
# data, fs = sf.read('./tmp/float32speech.wav', dtype='float32')
#
# plt.plot(data)
# plt.show()
# print(sd.wait()fs)
# sd.play(data, fs)
#
# sd.wait()
# import pyaudio
# import wave
#
# chunk = 1024 # Record in chunks of 1024 samples
# sample_format = pyaudio.paInt16 # 16 bits per sample
# channels = 1
# fs = 44100 # Record at 44100 samples per second
# seconds = 3
# filename = "output.wav"
#
# p = pyaudio.PyAudio() # Create an interface to PortAudio
#
# print('Recording')
#
# stream = p.open(format=sample_format,
# channels=channels,
# rate=fs,
# frames_per_buffer=chunk,
# input=True)
#
# frames = [] # Initialize array to store frames
#
# # Store data in chunks for 3 seconds
# for i in range(0, int(fs / chunk * seconds)):
# data = stream.read(chunk)
# frames.append(data)
#
# # Stop and close the stream
# stream.stop_stream()
# stream.close()
# # Terminate the PortAudio interface
# p.terminate()
#
# print('Finished recording')
#
# # Save the recorded data as a WAV file
# wf = wave.open(filename, 'wb')
# wf.setnchannels(channels)
# wf.setsampwidth(p.get_sample_size(sample_format))
# wf.setframerate(fs)
# wf.writeframes(b''.join(frames))
# wf.close()
#
#
#
# fs, data = read('output.wav')
# plt.plot(data)
# plt.show()
#
#
#
# # def save_wave_file(filepath, data):
# # wf = wave.open(filepath, 'wb')
# # wf.setnchannels(1)
# # wf.setsampwidth(2)
# # wf.setframerate(16000)
# # wf.writeframes(b''.join(data))
# # wf.close()
# #
# # pa = PyAudio()
# #
# # stream = pa.open(format=paFloat32, channels=1,
# # rate=16000, input=True, frames_per_buffer=2000, )
# # my_buf = []
# # # count = 0
# # t = time.time()
# # while time.time() < t + 6: # 秒
# # string_audio_data = stream.read(2000)
# # my_buf.append(string_audio_data)
# # # print('录音结束.')
# # save_wave_file('./tmp/static.wav', my_buf)
# # stream.close() | 2.953125 | 3 |
m2cgen/interpreters/ruby/code_generator.py | Symmetry-International/m2cgen | 2,161 | 12758138 | <reponame>Symmetry-International/m2cgen
from contextlib import contextmanager
from m2cgen.interpreters.code_generator import CodeTemplate, ImperativeCodeGenerator
class RubyCodeGenerator(ImperativeCodeGenerator):
tpl_var_declaration = CodeTemplate("")
tpl_num_value = CodeTemplate("{value}")
tpl_infix_expression = CodeTemplate("({left}) {op} ({right})")
tpl_return_statement = tpl_num_value
tpl_array_index_access = CodeTemplate("{array_name}[{index}]")
tpl_if_statement = CodeTemplate("if {if_def}")
tpl_else_statement = CodeTemplate("else")
tpl_block_termination = CodeTemplate("end")
tpl_var_assignment = CodeTemplate("{var_name} = {value}")
def add_function_def(self, name, args):
func_def = f"def {name}({', '.join(args)})"
self.add_code_line(func_def)
self.increase_indent()
@contextmanager
def function_definition(self, name, args):
self.add_function_def(name, args)
yield
self.add_block_termination()
def method_invocation(self, method_name, obj, args):
return f"({obj}).{method_name}({', '.join(map(str, args))})"
def vector_init(self, values):
return f"[{', '.join(values)}]"
| 2.40625 | 2 |
lib/services/loadbalancer/ncloud_loadbalancer/model/change_load_balancer_instance_configuration_request.py | KidongSohn/ncloud-sdk-py | 0 | 12758139 | # coding: utf-8
"""
loadbalancer
OpenAPI spec version: 2018-06-21T02:19:18Z
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ncloud_loadbalancer.model.load_balancer_rule_parameter import LoadBalancerRuleParameter # noqa: F401,E501
class ChangeLoadBalancerInstanceConfigurationRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'load_balancer_instance_no': 'str',
'load_balancer_algorithm_type_code': 'str',
'load_balancer_description': 'str',
'load_balancer_rule_list': 'list[LoadBalancerRuleParameter]'
}
attribute_map = {
'load_balancer_instance_no': 'loadBalancerInstanceNo',
'load_balancer_algorithm_type_code': 'loadBalancerAlgorithmTypeCode',
'load_balancer_description': 'loadBalancerDescription',
'load_balancer_rule_list': 'loadBalancerRuleList'
}
def __init__(self, load_balancer_instance_no=None, load_balancer_algorithm_type_code=None, load_balancer_description=None, load_balancer_rule_list=None): # noqa: E501
"""ChangeLoadBalancerInstanceConfigurationRequest - a model defined in Swagger""" # noqa: E501
self._load_balancer_instance_no = None
self._load_balancer_algorithm_type_code = None
self._load_balancer_description = None
self._load_balancer_rule_list = None
self.discriminator = None
self.load_balancer_instance_no = load_balancer_instance_no
self.load_balancer_algorithm_type_code = load_balancer_algorithm_type_code
if load_balancer_description is not None:
self.load_balancer_description = load_balancer_description
self.load_balancer_rule_list = load_balancer_rule_list
@property
def load_balancer_instance_no(self):
"""Gets the load_balancer_instance_no of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
로드밸런서인스턴스번호 # noqa: E501
:return: The load_balancer_instance_no of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:rtype: str
"""
return self._load_balancer_instance_no
@load_balancer_instance_no.setter
def load_balancer_instance_no(self, load_balancer_instance_no):
"""Sets the load_balancer_instance_no of this ChangeLoadBalancerInstanceConfigurationRequest.
로드밸런서인스턴스번호 # noqa: E501
:param load_balancer_instance_no: The load_balancer_instance_no of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:type: str
"""
if load_balancer_instance_no is None:
raise ValueError("Invalid value for `load_balancer_instance_no`, must not be `None`") # noqa: E501
self._load_balancer_instance_no = load_balancer_instance_no
@property
def load_balancer_algorithm_type_code(self):
"""Gets the load_balancer_algorithm_type_code of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
로드밸런서알고리즘구분코드 # noqa: E501
:return: The load_balancer_algorithm_type_code of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:rtype: str
"""
return self._load_balancer_algorithm_type_code
@load_balancer_algorithm_type_code.setter
def load_balancer_algorithm_type_code(self, load_balancer_algorithm_type_code):
"""Sets the load_balancer_algorithm_type_code of this ChangeLoadBalancerInstanceConfigurationRequest.
로드밸런서알고리즘구분코드 # noqa: E501
:param load_balancer_algorithm_type_code: The load_balancer_algorithm_type_code of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:type: str
"""
if load_balancer_algorithm_type_code is None:
raise ValueError("Invalid value for `load_balancer_algorithm_type_code`, must not be `None`") # noqa: E501
self._load_balancer_algorithm_type_code = load_balancer_algorithm_type_code
@property
def load_balancer_description(self):
"""Gets the load_balancer_description of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
로드밸런서설명 # noqa: E501
:return: The load_balancer_description of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:rtype: str
"""
return self._load_balancer_description
@load_balancer_description.setter
def load_balancer_description(self, load_balancer_description):
"""Sets the load_balancer_description of this ChangeLoadBalancerInstanceConfigurationRequest.
로드밸런서설명 # noqa: E501
:param load_balancer_description: The load_balancer_description of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:type: str
"""
self._load_balancer_description = load_balancer_description
@property
def load_balancer_rule_list(self):
"""Gets the load_balancer_rule_list of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
로드밸런RULE리스트 # noqa: E501
:return: The load_balancer_rule_list of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:rtype: list[LoadBalancerRuleParameter]
"""
return self._load_balancer_rule_list
@load_balancer_rule_list.setter
def load_balancer_rule_list(self, load_balancer_rule_list):
"""Sets the load_balancer_rule_list of this ChangeLoadBalancerInstanceConfigurationRequest.
로드밸런RULE리스트 # noqa: E501
:param load_balancer_rule_list: The load_balancer_rule_list of this ChangeLoadBalancerInstanceConfigurationRequest. # noqa: E501
:type: list[LoadBalancerRuleParameter]
"""
if load_balancer_rule_list is None:
raise ValueError("Invalid value for `load_balancer_rule_list`, must not be `None`") # noqa: E501
self._load_balancer_rule_list = load_balancer_rule_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ChangeLoadBalancerInstanceConfigurationRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.53125 | 2 |
grouper/fe/handlers/group_edit.py | aneeq009/merou | 58 | 12758140 | from __future__ import annotations
from typing import TYPE_CHECKING
from grouper.fe.forms import GroupEditForm
from grouper.fe.util import GrouperHandler
from grouper.models.audit_log import AuditLog
from grouper.models.counter import Counter
from grouper.models.group import Group
from grouper.role_user import is_role_user
from grouper.user_group import user_can_manage_group
if TYPE_CHECKING:
from typing import Any
class GroupEdit(GrouperHandler):
def get(self, *args: Any, **kwargs: Any) -> None:
name = self.get_path_argument("name")
group = Group.get(self.session, name=name)
if not group:
return self.notfound()
if not user_can_manage_group(self.session, group, self.current_user):
return self.forbidden()
form = GroupEditForm(obj=group)
self.render("group-edit.html", group=group, form=form)
def post(self, *args: Any, **kwargs: Any) -> None:
name = self.get_path_argument("name")
group = Group.get(self.session, name=name)
if not group:
return self.notfound()
if not user_can_manage_group(self.session, group, self.current_user):
return self.forbidden()
form = GroupEditForm(self.request.arguments, obj=group)
if not form.validate():
return self.render(
"group-edit.html", group=group, form=form, alerts=self.get_form_alerts(form.errors)
)
new_name = form.data["groupname"]
renamed = group.groupname != new_name
if renamed and is_role_user(self.session, group=group):
form.groupname.errors.append("You cannot change the name of service account groups")
return self.render(
"group-edit.html", group=group, form=form, alerts=self.get_form_alerts(form.errors)
)
if renamed and Group.get(self.session, name=new_name):
message = f"A group named '{new_name}' already exists (possibly disabled)"
form.groupname.errors.append(message)
return self.render(
"group-edit.html", group=group, form=form, alerts=self.get_form_alerts(form.errors)
)
group.groupname = new_name
group.email_address = form.data["email_address"]
group.description = form.data["description"]
group.canjoin = form.data["canjoin"]
group.auto_expire = form.data["auto_expire"]
group.require_clickthru_tojoin = form.data["require_clickthru_tojoin"]
Counter.incr(self.session, "updates")
self.session.commit()
AuditLog.log(
self.session, self.current_user.id, "edit_group", "Edited group.", on_group_id=group.id
)
url = f"/groups/{group.name}"
if renamed:
url += "?refresh=yes"
self.redirect(url)
| 2.109375 | 2 |
backend/src/classifier/classifier.py | orchardpark/freman | 1 | 12758141 | <gh_stars>1-10
import abc
from enum import Enum
class Classes(Enum):
PRODUCTIVE_HIGH = 1
PRODUCTIVE_LOW = 2
UNPRODUCTIVE = 3
UNKNOWN = 4
class Classifier(metaclass=abc.ABCMeta):
@abc.abstractmethod
def classify(application: str, window_title: str) -> Classes:
pass
| 2.90625 | 3 |
Code/Chapter-2/Chapter-2.13.py | RyzenMe/Beijing-Jiaotong-University-BigData-Homework | 3 | 12758142 | <gh_stars>1-10
import numpy as np
def mean(data):
sum = 0
mean = 0
for i in range(len(data)):
sum = sum +data[i]
mean = sum/(len(data)) #len(data)=12。range 是从0到11.
return mean
def min_max(data): #max_min归一化
x_max = max(data)
x_min = min(data)
print('中列数',(x_max+x_min)/2)
x_final = (20 - x_min)/(x_max-x_min)
print(x_final)
def x_score(data): #x分数归一化
x_final = (20-mean(data))/stock_sd
print(x_final)
def DecimalScaling(data): #小数定标规范化
x_max = max(data)
for i in range(10):
if x_max//10**i > 0:
i+=1
else:
break
x_decimal = 20/10**i
print(x_decimal)
def skewness(data):
x_skewness = 3*(mean(data)-stock_median)/stock_sd
print(x_skewness)
if __name__ == '__main__':
stock = [10, 7, 20, 12, 75, 15, 9, 18, 4, 12, 8, 14]
mean_stock = mean(stock)
print(mean_stock) #均值
stock_ndarray = np.array(stock) #stock_ndarray 是ndarray格式
print(np.mean(stock_ndarray)) #均值
stock_median = np.median(stock_ndarray)
print(np.median(stock_ndarray)) #中位数
counts = np.bincount(stock_ndarray) #对[0,75]的数,进行一一映射,记录每个数出现的次数
print(counts)
print(np.argmax(counts)) #选择最大的计数,输出位置,众数
stock_sd = np.std(stock_ndarray,ddof = 1) #ddof = 1 表明计算的为无偏标准差,即除n-1
print(stock_sd)
min_max(stock)
x_score(stock)
DecimalScaling(stock) #20的小数定标规范化数值
skewness(stock) #数据倾斜度 | 3.234375 | 3 |
src/Weapon.py | Diomenios/game_jam_3.0 | 0 | 12758143 | <filename>src/Weapon.py
import arcade
import CONST
class Weapon():
def __init__(self):
self.rate = 20
self.ammo_vel = CONST.BULLET_INIT_VEL
self.ammo_dmg = 1
self.ammo_hit_point = 1
self.nb_bullet = 1
| 2.546875 | 3 |
Preparation/Basic_Python Program/split.py | jaiswalIT02/pythonprograms | 0 | 12758144 | <gh_stars>0
word="boy"
print(word)
reverse=[]
l=list(word)
for i in l:
reverse=[i]+reverse
reverse="".join(reverse)
print(reverse)
l=[1,2,3,4]
print(l)
r=[]
for i in l:
r=[i]+r
print(r) | 3.53125 | 4 |
aiodeta/__init__.py | leits/aiodeta | 14 | 12758145 | from .client import Deta
__version__ = "0.1.0"
__all__ = (Deta.__name__,)
| 1.015625 | 1 |
utils/lib/externalHandler.py | kicheolkim/multiple_sclerosis_proj | 9 | 12758146 | import pandas as pd
import numpy as np
import itertools
class handlers(object):
def get_column(filename_with_path, ext_value, annot='gene_id', header_line=0, sep="\t", opt=0):
"""
filename_with_path = filepath + basename
ext_value = column name of file
sep = separator
"""
# Don't use pandas.read_csv because of memory usage
index_list = []
value_list = []
with open(filename_with_path, 'r') as infile:
for i, line in enumerate(infile):
line = line.strip()
if i==header_line: # found header
header_info = line.split(sep)
value_ext_location = header_info.index(ext_value) # location of value extraction point
index_ext_location = header_info.index(annot) # location of value extraction point
elif i!=header_line:
line_list = line.split(sep)
index_list.append(str(line_list[index_ext_location])) # Value list
value_list.append(float(line_list[value_ext_location])) # Index list
result_df = pd.DataFrame(data={ext_value: value_list}, index=index_list)
return result_df
def get_samplename(filelist):
"""
filelist = list of basename
Lambda function could be--
_get_samplename = lambda filelist : [x.split("-")[0] for x in filelist]
"""
sampleName = [x.split("-")[0] for x in filelist]
return sampleName
def get_condtionMatrix_by_category(dataframe, sampleColumn, dataColname, conditions:list):
"""
Transform meta data to DESeq condition matrix
Input
dataframe: metadata input
sampleColumn: Column name for Sample ID in metadata input
dataColumn: Column name for category value in metadata input
conditions: Conditions you selected, list type, and it has 2 elements
Output
result dataframe with 2 columns (colnames: sampleID, conditions)
"""
assert len(conditions)==2, "Please make sure that conditions list has 2 elements"
sampleList = [] # empty list
conditionValues = []
for x in conditions:
data = dataframe[dataframe[dataColname]==x][sampleColumn] # get sample name
sampleList.append(data.values.tolist()) # sampleID
conditionValues.append([x]*len(data.values.tolist())) # condition value
sampleList = list(itertools.chain(*sampleList)) # flatten
conditionValues = list(itertools.chain(*conditionValues))
result = pd.DataFrame(data={'sampleID':sampleList, 'conditions':conditionValues}).set_index('sampleID')
return result
| 3.078125 | 3 |
Programming Advanced/File Handling/file_reader.py | antonarnaudov/SoftUniProjects | 0 | 12758147 | path = '08-File-Handling-Lab-Resources/File Reader/numbers.txt'
file = open(path, 'r')
num_sum = 0
for num in file:
num_sum += int(num)
print(num) # read
print(num_sum)
print(file.read()) # .read(n) n = number
| 3.765625 | 4 |
hsx_01/meiduo_mall/meiduo_mall/apps/meiduo_admin/serializers/home_serializers.py | hsx9527/test01 | 0 | 12758148 | from rest_framework import serializers
from goods.models import GoodsVisitCount
# 日商品分类调用
class GoodVistiModelSerializer(serializers.ModelSerializer):
category = serializers.StringRelatedField()
class Meta:
model = GoodsVisitCount
fields = [
'category', # 外键关联字段
'count'
] | 2.046875 | 2 |
apps/venn3.py | orionzhou/biolib | 3 | 12758149 | <filename>apps/venn3.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Readlist utilities
"""
import os.path as op
import sys
import re
import logging
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
from matplotlib_venn import venn3, venn3_circles
def venn3_coord(args):
fhi = open(args.fi, 'r')
s1 = fhi.readline().strip().split(",")
s2 = fhi.readline().strip().split(",")
s3 = fhi.readline().strip().split(",")
fhi.close()
s1, s2, s3 = set(s1), set(s2), set(s3)
v = venn3([s1, s2, s3], ('A','B','C'))
fho1 = open(args.fo1, 'w')
for xy, l in zip(v.centers, v.radii):
x, y = xy
fho1.write("%s\t%s\t%s\n" % (x, y, l))
fho1.close()
fho2 = open(args.fo2, 'w')
for xyl in v.subset_labels:
x, y = xyl.get_position()
l = xyl.get_text()
fho2.write("%s\t%s\t%s\n" % (x, y, l))
fho2.close()
def add_stat(args):
cvt = {k: int for k in 'Replicate'.split()}
sl = pd.read_csv(args.fi, sep="\t", header=0, converters=cvt)
firstN = 10000
sl['spots'] = [0] * len(sl.index)
sl['avgLength'] = [0] * len(sl.index)
for i in range(len(sl)):
sid = sl['SampleID'][i]
fq = ''
if sl['paired'][i]:
r1, r2 = sl['r1'][i], sl['r2'][i]
fq = r1
else:
fq = sl['r0'][i]
nrcd = 0
L = []
for rec in iter_fastq(fq):
if not rec:
break
nrcd += 1
if nrcd <= firstN:
L.append(len(rec))
avgLength = SummaryStats(L).mean
if sl['paired'][i]:
avgLength = avgLength * 2
print("\t".join(str(x) for x in (sid, nrcd, avgLength)))
sl.at[i, 'spots'] = nrcd
sl.at[i, 'avgLength'] = avgLength
sl.to_csv(args.fo, sep="\t", header=True, index=False)
def main():
import argparse
ps = argparse.ArgumentParser(
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
description = '3-way venn-diagram'
)
sp = ps.add_subparsers(title = 'available commands', dest = 'command')
sp1 = sp.add_parser('coord', help='compute venn3 coordinates',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('fi', help = 'input file containing sets')
sp1.add_argument('fo1', help = 'output circle coordinates')
sp1.add_argument('fo2', help = 'output label coordinates')
sp1.set_defaults(func = venn3_coord)
args = ps.parse_args()
if args.command:
args.func(args)
else:
print('Error: need to specify a sub command\n')
parser.print_help()
if __name__ == '__main__':
main()
| 2.59375 | 3 |
main.py | swipswaps/Mash-Up | 29 | 12758150 | from __future__ import division
import librosa, pydub
import numpy as np
from tempfile import TemporaryFile
import pickle, json, os
class mash:
def __init__(self, json_, cached=False):
self.sr = 22050 # new Sampling Rate for the audio files
self.songs = json_
self.Yin = []
self.Yout = []
self.pathIn = []
self.pathOut = []
self.beats = {'in': [], 'out': []}
self.tempo = {'in': 0, 'out': 0}
self._setup()
self._load(cached=cached)
self._extract()
self._segment()
self._speedUp()
out = self._mix()
print("Exporting...")
out.export(out_f="final.mp3", format="mp3")
print("[SUCCESS] Export as `final.mp3`")
def _setup(self):
if not os.path.exists('cache'):
os.makedirs('cache')
def _load(self, cached=True):
for song in self.songs:
if os.path.exists("cache/%s.pkl"%song['name']):
print("\nLoading", song['name'], "from cache")
with open("cache/%s.pkl"%song['name'], 'rb') as f:
if song['mixin']:
print("Yin=", song['name'])
self.Yin = pickle.load(f)
self.pathIn = song['path']
else:
print("Yout=", song['name'])
self.Yout.append(pickle.load(f))
self.pathOut.append(song['path'])
continue
print("\nLoading", song['name'])
y, sr = librosa.load(song['path'], sr=self.sr)
if song['mixin']:
self.Yin = y
self.pathIn = song['path']
else:
self.Yout.append(y)
self.pathOut.append(song['path'])
print("[SUCCESS] Loaded", song['name'])
if cached:
try:
with open('cache/%s.pkl'%song['name'], 'wb') as f:
pickle.dump(y, f)
print("[SUCCESS] Cached", song['name'])
except Exception as e:
print("[FAILED] Caching", song['name'])
print(e)
def _extract(self):
# TODO: Add cosine distance similarity to choose the best mixout
self.Yout = self.Yout[0] # NOTE: considering 1mixin & 1mixout
self.pathOut = self.pathOut[0]
self.tempo['in'], self.beats['in'] = librosa.beat.beat_track(y=self.Yin, sr=self.sr)
self.tempo['out'], self.beats['out'] = librosa.beat.beat_track(y=self.Yout, sr=self.sr)
print("TempoIn=", self.tempo['in'])
print("TempoOut=", self.tempo['out'])
self._OTAC()
self._crossFadeRegion()
def _OTAC(self): # Optimal Tempo Adjustment Coefficient Computation
C = [-2, -1, 0, 1, 2]
if self.tempo['in'] == self.tempo['out']:
self.tempo['tgt'] = self.tempo['in']
return
Tin_ = [(2**c)*self.tempo['in'] for c in C]
TinIndex_ = np.argmin(np.absolute(Tin_ - self.tempo['out']))
Copt = C[TinIndex_]
Bopt = (2**Copt)*self.tempo['in']
Tlow = min(Bopt, self.tempo['out'])
Thigh = max(Bopt, self.tempo['out'])
a, b = 0.765, 1
Ttgt = (a-b)*Tlow + np.sqrt( ((a-b)**2)*(Tlow**2) + 4*a*b*Thigh*Tlow )
Ttgt = Ttgt/(2*a)
print("FoptIn=", Ttgt/Bopt)
print("FoptOut=", Ttgt/self.tempo['out'])
print("Ttgt=", Ttgt)
self.tempo['tgt'] = Ttgt
def _crossFadeRegion(self): # Computes the cross fade region for the mixed song
Na = self.beats['in'].shape[0]-1
scores = [self._score(i, Na) for i in range(2, int(Na/4))]
noBeats = np.argmax(scores)+2
inDuration = librosa.get_duration(y=self.Yin, sr=self.sr)
fadeInStart = librosa.frames_to_time(self.beats['in'], sr=self.sr)[-int(noBeats/2)]
fadeIn = inDuration - fadeInStart
fadeOut = librosa.frames_to_time(self.beats['out'], sr=self.sr)[int(noBeats/2)]
print("Best Power Corelation Scores=", np.max(scores))
print("Number of beats in cross fade region=", noBeats)
print("fadeInStart=", fadeInStart)
print("fadeOutEnd=", fadeOut)
print("Cross Fade Time=", fadeIn+fadeOut)
self.crossFade = [fadeInStart*1000, fadeOut*1000] # In milliseconds
def _score(self, T, Na):
cr = 0
for i in range(1, T+1):
cr += self.beats['in'][Na-i+1]*self.beats['out'][i]
return cr/T
def _segment(self):
print("Started Segmentation")
sIn = pydub.AudioSegment.from_file(self.pathIn, format="mp3")
sOut = pydub.AudioSegment.from_file(self.pathOut, format="mp3")
print("[SUCCESS] Segmented audio files")
self.segments = {
'in': [ sIn[:self.crossFade[0]], sIn[self.crossFade[0]:] ],
'out': [ sOut[:self.crossFade[1]], sOut[self.crossFade[1]:] ],
}
del sIn, sOut
def _speedUp(self):
s1 = self.segments['in'][1]
s2 = self.segments['out'][0]
speed1 = self.tempo['tgt']/self.tempo['in']
speed2 = self.tempo['tgt']/self.tempo['out']
print("Playback Speed of in end segment=",speed1,'X')
print("Playback Speed of out start segment=",speed2,'X')
s1 = s1.speedup(playback_speed=speed1)
s2 = s1.speedup(playback_speed=speed2)
def _mix(self):
xf = self.segments['in'][1].fade(to_gain=-120, start=0, end=float('inf'))
xf *= self.segments['out'][0].fade(from_gain=-120, start=0, end=float('inf'))
out = TemporaryFile()
out.write(self.segments['in'][0]._data)
out.write(xf._data)
out.write(self.segments['out'][1]._data)
out.seek(0)
print("[SUCCESS] Mixed 4 audio segment to 1")
return self.segments['in'][0]._spawn(data=out)
if __name__ == '__main__':
with open('songs.json', 'r') as f:
j = json.loads(f.read())
obj = mash(j, cached=True)
| 2.609375 | 3 |