hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31ecb782be25a483f73139a3d3781cbd65bbb995 | 13,447 | py | Python | tests/matrix_extern_irq_reset/matrix_extern_irq_reset.py | RyusukeYamano/nngen | 9ed1f7fb83908794aa94d70287d89545d45fe875 | [
"Apache-2.0"
] | 207 | 2019-11-12T11:42:25.000Z | 2022-03-20T20:32:17.000Z | tests/matrix_extern_irq_reset/matrix_extern_irq_reset.py | RyusukeYamano/nngen | 9ed1f7fb83908794aa94d70287d89545d45fe875 | [
"Apache-2.0"
] | 31 | 2019-11-25T07:33:30.000Z | 2022-03-17T12:34:34.000Z | tests/matrix_extern_irq_reset/matrix_extern_irq_reset.py | RyusukeYamano/nngen | 9ed1f7fb83908794aa94d70287d89545d45fe875 | [
"Apache-2.0"
] | 29 | 2019-11-07T02:25:48.000Z | 2022-03-12T16:22:57.000Z | from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import functools
import math
import numpy as np
if sys.version_info.major < 3:
from itertools import izip_longest as zip_longest
else:
from itertools import zip_longest
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
import nngen as ng
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def run(a_shape=(15, 15), b_shape=(15, 15),
a_dtype=ng.int32, b_dtype=ng.int32, c_dtype=ng.int32,
par=1, axi_datawidth=32, interrupt_name='irq', silent=False,
filename=None, simtype='iverilog', outputfile=None):
# create target hardware
a = ng.placeholder(a_dtype, shape=a_shape, name='a')
b = ng.placeholder(b_dtype, shape=b_shape, name='b')
d = ng.add(a, b, dtype=c_dtype, par=par)
e = ng.add(b, a, dtype=c_dtype, par=par)
# SW returns ng.add(x, y)
f = ng.extern([d, e], shape=a_shape, opcode=0x1,
func=lambda x, y: x + y)
g = ng.sub(f, a)
# SW returns d as-is
h = ng.extern([g], shape=a_shape, opcode=0x2,
func=lambda x: x)
c = ng.sub(h, b)
targ = ng.to_veriloggen([c], 'matrix_extern', silent=silent,
config={'maxi_datawidth': axi_datawidth,
'interrupt_name': interrupt_name})
# verification data
va = np.arange(a.length, dtype=np.int64).reshape(a.shape) % [16]
vb = np.arange(b.length, dtype=np.int64).reshape(b.shape) % [32] + [16]
eval_outs = ng.eval([c], a=va, b=vb)
vc = eval_outs[0]
# to memory image
size_max = int(math.ceil(max(a.memory_size, b.memory_size, c.memory_size) / 4096)) * 4096
check_addr = max(a.addr, b.addr, c.addr) + size_max
size_check = size_max
tmp_addr = check_addr + size_check
memimg_datawidth = 32
mem = np.zeros([1024 * 1024 * 8 // (memimg_datawidth // 8)], dtype=np.int64)
mem = mem + [100]
axi.set_memory(mem, va, memimg_datawidth,
a_dtype.width, a.addr,
max(int(math.ceil(axi_datawidth / a_dtype.width)), par))
axi.set_memory(mem, vb, memimg_datawidth,
b_dtype.width, b.addr,
max(int(math.ceil(axi_datawidth / b_dtype.width)), par))
axi.set_memory(mem, vc, memimg_datawidth,
c_dtype.width, check_addr,
max(int(math.ceil(axi_datawidth / c_dtype.width)), par))
# test controller
m = Module('test')
params = m.copy_params(targ)
ports = m.copy_sim_ports(targ)
clk = ports['CLK']
resetn = ports['RESETN']
irq = ports[interrupt_name]
rst = m.Wire('RST')
rst.assign(Not(resetn))
# AXI memory model
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
memory = axi.AxiMemoryModel(m, 'memory', clk, rst,
datawidth=axi_datawidth,
memimg_datawidth=memimg_datawidth,
memimg=mem, memimg_name=memimg_name)
memory.connect(ports, 'maxi')
# AXI-Slave controller
_saxi = vthread.AXIMLite(m, '_saxi', clk, rst, noio=True)
_saxi.connect(ports, 'saxi')
# timer
time_counter = m.Reg('time_counter', 32, initval=0)
seq = Seq(m, 'seq', clk, rst)
seq(
time_counter.inc()
)
num_rep = functools.reduce(lambda x, y: x * y, c.shape[:-1], 1)
def irq_join(saxi, irq_bit):
while irq == 0:
pass
araddr = ng.control_reg_interrupt_isr * 4
irq_stat = saxi.read(araddr)
if irq_stat != irq_bit:
print('# Unexpected irq signal: %d' % irq_stat)
print('# verify: FAILED')
vthread.finish()
print('# irq stat = %d' % irq_stat)
awaddr = ng.control_reg_interrupt_iar * 4
saxi.write(awaddr, irq_bit)
def ctrl():
for i in range(100):
pass
ng.sim.set_global_addrs(_saxi, tmp_addr)
araddr_ext_snd = ng.control_reg_extern_send * 4
awaddr_ext_rcv = ng.control_reg_extern_recv * 4
awaddr_irq_ier = ng.control_reg_interrupt_ier * 4
araddr_irq_isr = ng.control_reg_interrupt_isr * 4
awaddr_irq_iar = ng.control_reg_interrupt_iar * 4
_saxi.write(awaddr_irq_ier , 3) # irq enable
ng.sim.sw_rst(_saxi)
print('# 0st software reset (during idle)')
for i in range(100):
pass
irq_stat = _saxi.read(araddr_irq_isr)
if irq_stat != 0:
print('# Unexpected irq signal: %d' % irq_stat)
print('# verify: FAILED')
vthread.finish()
print('# irq stat = %d' % irq_stat) # no irq busy by software reset when idle
start_time = time_counter.value
ng.sim.start(_saxi)
print('# 1st test start')
# from extern-send
irq_join(_saxi, 2)
v = _saxi.read(araddr_ext_snd)
print('# opcode = %d' % v)
for i in range(num_rep):
for j in range(c.shape[-1]):
x_offset = tmp_addr - d.default_global_addr
y_offset = tmp_addr - e.default_global_addr
z_offset = tmp_addr - f.default_global_addr
x = memory.read_word(i * c.aligned_shape[-1] + j,
d.addr + x_offset, c_dtype.width)
y = memory.read_word(i * c.aligned_shape[-1] + j,
e.addr + y_offset, c_dtype.width)
z = x + y
memory.write_word(i * c.aligned_shape[-1] + j,
f.addr + z_offset, z, c_dtype.width)
# to extern-recv
_saxi.write(awaddr_ext_rcv, 1)
# from extern-send
irq_join(_saxi, 2)
v = _saxi.read(araddr_ext_snd)
print('# opcode = %d' % v)
# software reset
ng.sim.sw_rst(_saxi)
print('# 1st software reset (before resume)')
# from extern-send
irq_join(_saxi, 1)
# restart
ng.sim.start(_saxi)
print('# Restart')
# from extern-send
irq_join(_saxi, 2)
v = _saxi.read(araddr_ext_snd)
print('# opcode = %d' % v)
for i in range(num_rep):
for j in range(c.shape[-1]):
x_offset = tmp_addr - d.default_global_addr
y_offset = tmp_addr - e.default_global_addr
z_offset = tmp_addr - f.default_global_addr
x = memory.read_word(i * c.aligned_shape[-1] + j,
d.addr + x_offset, c_dtype.width)
y = memory.read_word(i * c.aligned_shape[-1] + j,
e.addr + y_offset, c_dtype.width)
z = x + y
memory.write_word(i * c.aligned_shape[-1] + j,
f.addr + z_offset, z, c_dtype.width)
# to extern-recv
_saxi.write(awaddr_ext_rcv, 1)
# from extern-send
irq_join(_saxi, 2)
v = _saxi.read(araddr_ext_snd)
print('# opcode = %d' % v)
for i in range(num_rep):
for j in range(c.shape[-1]):
x_offset = tmp_addr - g.default_global_addr
z_offset = tmp_addr - h.default_global_addr
x = memory.read_word(i * c.aligned_shape[-1] + j,
g.addr + x_offset, c_dtype.width)
z = x
memory.write_word(i * c.aligned_shape[-1] + j,
h.addr + z_offset, z, c_dtype.width)
# to extern-recv
_saxi.write(awaddr_ext_rcv, 1)
# from extern-send
irq_join(_saxi, 1)
#ng.sim.wait(_saxi)
end_time = time_counter.value
print('# end')
print('# execution cycles: %d' % (end_time - start_time))
# verify
ok_1st = True
for i in range(num_rep):
for j in range(c.shape[-1]):
orig = memory.read_word(i * c.aligned_shape[-1] + j,
c.addr, c_dtype.width)
check = memory.read_word(i * c.aligned_shape[-1] + j,
check_addr, c_dtype.width)
if vthread.verilog.NotEql(orig, check):
print('NG', i, j, orig, check)
ok_1st = False
# else:
# print('OK', i, j, orig, check)
# 2nd test
# start
start_time = time_counter.value
ng.sim.start(_saxi)
print('# 2nd test start')
# from extern-send
irq_join(_saxi, 2)
v = _saxi.read(araddr_ext_snd)
print('# opcode = %d' % v)
for i in range(num_rep):
for j in range(c.shape[-1]):
x_offset = tmp_addr - d.default_global_addr
y_offset = tmp_addr - e.default_global_addr
z_offset = tmp_addr - f.default_global_addr
x = memory.read_word(i * c.aligned_shape[-1] + j,
d.addr + x_offset, c_dtype.width)
y = memory.read_word(i * c.aligned_shape[-1] + j,
e.addr + y_offset, c_dtype.width)
z = x + y
memory.write_word(i * c.aligned_shape[-1] + j,
f.addr + z_offset, z, c_dtype.width)
# to extern-recv
_saxi.write(awaddr_ext_rcv, 1)
while (memory.waddr.awvalid) == 0:
pass
ng.sim.sw_rst(_saxi)
print('# 2nd software reset (during Master AXI transaction)')
irq_join(_saxi, 1) # irq busy by software reset
# restart
ng.sim.start(_saxi)
print('# Restart')
# from extern-send
irq_join(_saxi, 2)
araddr = ng.control_reg_extern_send * 4
print('# opcode = %d' % v)
for i in range(num_rep):
for j in range(c.shape[-1]):
x_offset = tmp_addr - d.default_global_addr
y_offset = tmp_addr - e.default_global_addr
z_offset = tmp_addr - f.default_global_addr
x = memory.read_word(i * c.aligned_shape[-1] + j,
d.addr + x_offset, c_dtype.width)
y = memory.read_word(i * c.aligned_shape[-1] + j,
e.addr + y_offset, c_dtype.width)
z = x + y
memory.write_word(i * c.aligned_shape[-1] + j,
f.addr + z_offset, z, c_dtype.width)
# to extern-recv
_saxi.write(awaddr_ext_rcv, 1)
# from extern-send
irq_join(_saxi, 2)
v = _saxi.read(araddr_ext_snd)
print('# opcode = %d' % v)
for i in range(num_rep):
for j in range(c.shape[-1]):
x_offset = tmp_addr - g.default_global_addr
z_offset = tmp_addr - h.default_global_addr
x = memory.read_word(i * c.aligned_shape[-1] + j,
g.addr + x_offset, c_dtype.width)
z = x
memory.write_word(i * c.aligned_shape[-1] + j,
h.addr + z_offset, z, c_dtype.width)
# to extern-recv
_saxi.write(awaddr_ext_rcv, 1)
# termination
irq_join(_saxi, 1)
#ng.sim.wait(_saxi)
end_time = time_counter.value
print('# end')
print('# execution cycles: %d' % (end_time - start_time))
# verify
ok_2nd = True
for i in range(num_rep):
for j in range(c.shape[-1]):
orig = memory.read_word(i * c.aligned_shape[-1] + j,
c.addr, c_dtype.width)
check = memory.read_word(i * c.aligned_shape[-1] + j,
check_addr, c_dtype.width)
if vthread.verilog.NotEql(orig, check):
print('NG', i, j, orig, check)
ok_2nd = False
# else:
# print('OK', i, j, orig, check)
if ok_1st and ok_2nd:
print('# verify: PASSED')
else:
print('# verify: FAILED')
vthread.finish()
th = vthread.Thread(m, 'th_ctrl', clk, rst, ctrl)
fsm = th.start()
uut = m.Instance(targ, 'uut',
params=m.connect_params(targ),
ports=m.connect_ports(targ))
# simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, resetn, m.make_reset(), period=100, polarity='low')
init.add(
Delay(1000000),
Systask('finish'),
)
# output source code
if filename is not None:
m.to_verilog(filename)
# run simulation
sim = simulation.Simulator(m, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'verilator' and lines[-1].startswith('-'):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(silent=False, filename='tmp.v')
print(rslt)
| 33.039312 | 93 | 0.533428 |
677ca5b0e1c29125ac2883a4cf35b55b7a9ea4d5 | 2,328 | py | Python | roles/models/cta/cta_mixmatch.py | JMFlin/auto-preference-finder | 07dd2e6b2b28398ca9bfb6ad328c578eb9987417 | [
"MIT"
] | null | null | null | roles/models/cta/cta_mixmatch.py | JMFlin/auto-preference-finder | 07dd2e6b2b28398ca9bfb6ad328c578eb9987417 | [
"MIT"
] | 7 | 2020-11-13T17:50:07.000Z | 2022-02-10T02:17:14.000Z | roles/models/cta/cta_mixmatch.py | JMFlin/auto-preference-finder | 07dd2e6b2b28398ca9bfb6ad328c578eb9987417 | [
"MIT"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from absl import app
from absl import flags
from cta.lib.train import CTAClassifySemi
from libml import data, utils
from mixmatch import MixMatch
FLAGS = flags.FLAGS
class CTAMixMatch(MixMatch, CTAClassifySemi):
pass
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.PAIR_DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = CTAMixMatch(
os.path.join(FLAGS.train_dir, dataset.name, CTAMixMatch.cta_name()),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
w_match=FLAGS.w_match,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.75, 'Mixup beta distribution.')
flags.DEFINE_float('w_match', 100, 'Weight for distribution matching loss.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('augment', 'd.d.d')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
| 34.235294 | 87 | 0.675687 |
2f008983fdb56afb61f5759e739ffba9951f07f4 | 3,754 | py | Python | tests/util/test_merkle.py | MatthiasLohr/bfebench | baca2e18a9c24282ecda99dccfd134fab4c223b3 | [
"Apache-2.0"
] | null | null | null | tests/util/test_merkle.py | MatthiasLohr/bfebench | baca2e18a9c24282ecda99dccfd134fab4c223b3 | [
"Apache-2.0"
] | null | null | null | tests/util/test_merkle.py | MatthiasLohr/bfebench | baca2e18a9c24282ecda99dccfd134fab4c223b3 | [
"Apache-2.0"
] | null | null | null | # This file is part of the Blockchain-based Fair Exchange Benchmark Tool
# https://gitlab.com/MatthiasLohr/bfebench
#
# Copyright 2021-2022 Matthias Lohr <mail@mlohr.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base64 import b64decode, b64encode
from math import log2
from unittest import TestCase
from bfebench.protocols.fairswap.util import B032, keccak
from bfebench.utils.bytes import generate_bytes
from bfebench.utils.merkle import (
MerkleTreeLeaf,
MerkleTreeNode,
from_bytes,
mt2obj,
obj2mt,
)
class MerkleTest(TestCase):
EXAMPLE_TREE1 = MerkleTreeNode(
keccak,
MerkleTreeNode(
keccak,
MerkleTreeLeaf(keccak, generate_bytes(32)),
MerkleTreeLeaf(keccak, generate_bytes(32)),
),
MerkleTreeNode(
keccak,
MerkleTreeLeaf(keccak, generate_bytes(32)),
MerkleTreeLeaf(keccak, generate_bytes(32)),
),
)
def test_init(self) -> None:
self.assertRaises(
ValueError,
MerkleTreeNode,
keccak,
MerkleTreeLeaf(keccak, B032),
MerkleTreeLeaf(keccak, B032),
MerkleTreeLeaf(keccak, B032),
)
def test_get_proof_and_validate(self) -> None:
for slice_count in [2, 4, 8, 16]:
tree = from_bytes(generate_bytes(32 * slice_count), keccak, slice_count)
for index, leaf in enumerate(tree.leaves):
proof = tree.get_proof(leaf)
self.assertEqual(len(proof), int(log2(slice_count)))
self.assertTrue(MerkleTreeNode.validate_proof(tree.digest, leaf, index, proof, keccak))
def test_mt2obj2mt_plain(self) -> None:
obj = mt2obj(self.EXAMPLE_TREE1)
mt2 = obj2mt(obj, keccak)
self.assertEqual(self.EXAMPLE_TREE1, mt2)
self.assertEqual(self.EXAMPLE_TREE1.digest, mt2.digest)
def test_mt2obj2mt_b64(self) -> None:
obj = mt2obj(self.EXAMPLE_TREE1, b64encode)
mt2 = obj2mt(obj, keccak, b64decode)
self.assertEqual(self.EXAMPLE_TREE1, mt2)
self.assertEqual(self.EXAMPLE_TREE1.digest, mt2.digest)
def test_mt2obj2mt_hex(self) -> None:
obj = mt2obj(self.EXAMPLE_TREE1, encode_func=lambda b: bytes(b).hex())
mt2 = obj2mt(obj, digest_func=keccak, decode_func=lambda s: bytes.fromhex(str(s)))
self.assertEqual(self.EXAMPLE_TREE1, mt2)
self.assertEqual(self.EXAMPLE_TREE1.digest, mt2.digest)
def test_obj2mt_error(self) -> None:
self.assertRaises(ValueError, obj2mt, 3, keccak)
def test_from_bytes_encode_decode(self) -> None:
data = generate_bytes(32 * 2 * 16)
tree_original = from_bytes(data, digest_func=keccak, slice_count=16)
self.assertEqual(b"".join([leaf.data for leaf in tree_original.leaves]), data)
tree_encoded = mt2obj(tree_original, encode_func=lambda b: bytes(b).hex())
tree_decoded = obj2mt(
tree_encoded,
digest_func=keccak,
decode_func=lambda s: bytes.fromhex(str(s)),
)
self.assertEqual(b"".join([leaf.data for leaf in tree_decoded.leaves]), data)
self.assertEqual(tree_original.digest, tree_decoded.digest)
| 35.752381 | 103 | 0.669952 |
19350fafd622afdc51cd5cfffa5ee18ce5f5f27f | 22,151 | py | Python | tensorflow_probability/python/internal/backend/numpy/ops.py | PavanKishore21/probability | 4bad1b796b0e6ed2959205915d42788817620c4c | [
"Apache-2.0"
] | 2 | 2019-10-30T04:45:07.000Z | 2019-10-30T04:45:08.000Z | tensorflow_probability/python/internal/backend/numpy/ops.py | PavanKishore21/probability | 4bad1b796b0e6ed2959205915d42788817620c4c | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/internal/backend/numpy/ops.py | PavanKishore21/probability | 4bad1b796b0e6ed2959205915d42788817620c4c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Numpy implementations of TensorFlow functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
# Dependency imports
import numpy as np
import numpy as onp # Avoid JAX rewrite. # pylint: disable=reimported
import six
from tensorflow_probability.python.internal.backend.numpy import _utils as utils
from tensorflow_probability.python.internal.backend.numpy import nest
from tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape
try: # May not be available, not a core dep for TFP.
import wrapt # pylint: disable=g-import-not-at-top
except ImportError:
wrapt = None
__all__ = [
'bitcast',
'broadcast_dynamic_shape',
'broadcast_static_shape',
'broadcast_to',
'cast',
'clip_by_value',
'constant',
'control_dependencies',
'convert_to_tensor',
'custom_gradient',
'device',
'enable_v2_behavior',
'ensure_shape',
'executing_eagerly',
'get_static_value',
'identity',
'init_scope',
'is_tensor',
'name_scope',
'newaxis',
'register_tensor_conversion_function',
'stop_gradient',
'GradientTape',
'Module',
'Tensor',
'Variable',
# 'gradients',
]
JAX_MODE = False
if JAX_MODE:
import jax # pylint: disable=g-import-not-at-top
class _NullContext(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions.
def _broadcast_static_shape(shape_x, shape_y):
"""Reimplements `tf.broadcast_static_shape` in JAX/NumPy."""
if (tensor_shape.TensorShape(shape_x).ndims is None or
tensor_shape.TensorShape(shape_y).ndims is None):
return tensor_shape.TensorShape(None)
shape_x = tuple(tensor_shape.TensorShape(shape_x).as_list())
shape_y = tuple(tensor_shape.TensorShape(shape_y).as_list())
try:
if JAX_MODE:
error_message = 'Incompatible shapes for broadcasting'
return tensor_shape.TensorShape(lax.broadcast_shapes(shape_x, shape_y))
error_message = ('shape mismatch: objects cannot be broadcast to'
' a single shape')
return tensor_shape.TensorShape(
np.broadcast(np.zeros(shape_x), np.zeros(shape_y)).shape)
except ValueError as e:
# Match TF error message
if error_message in str(e):
raise ValueError(
'Incompatible shapes for broadcasting: {} and {}'.format(
shape_x, shape_y))
raise
def _broadcast_dynamic_shape(shape_x, shape_y):
"""Reimplements `tf.broadcast_dynamic_shape` in JAX/NumPy."""
return convert_to_tensor(_broadcast_static_shape(shape_x, shape_y))
broadcast_shape = _broadcast_static_shape
def _constant(value, dtype=None, shape=None, name='Const'): # pylint: disable=unused-argument
x = convert_to_tensor(value, dtype=dtype)
if shape is None:
return x
if not x.shape:
return np.full(shape, x)
return np.reshape(x, shape)
def _control_dependencies(control_inputs):
if control_inputs:
for control in control_inputs:
if callable(control):
control()
return _NullContext()
tensor_conversion_registry = {}
def register_tensor_conversion_function(base_type, conversion_func):
# No priority system like TensorFlow yet
tensor_conversion_registry[base_type] = conversion_func
def _convert_to_tensor(value, dtype=None, dtype_hint=None, name=None): # pylint: disable=unused-argument
"""Emulates tf.convert_to_tensor."""
dtype = utils.numpy_dtype(dtype)
dtype_hint = utils.numpy_dtype(dtype_hint)
if is_tensor(value) and not isinstance(value, Variable):
# In NumPy mode, we are lenient on the dtype compatibility check because
# some codepaths rely on flexible conversion from int/float64 to 32.
if dtype is not None and value.dtype != dtype:
if JAX_MODE:
raise TypeError(('Tensor conversion requested dtype {} for array with '
'dtype {}: {}').format(dtype, value.dtype, value))
return value.astype(dtype)
return value
conversion_func = tensor_conversion_registry.get(type(value),
_default_convert_to_tensor)
ret = None
if dtype is None and dtype_hint is not None:
try:
ret = conversion_func(value, dtype=dtype_hint)
except (TypeError, ValueError):
pass
if ret is None:
ret = conversion_func(value, dtype=dtype)
return ret
def _infer_dtype(value, default_dtype):
"""Guesses an object's dtype."""
# Need to check for onp type first because onp types are subclasses of Python
# types.
if hasattr(value, 'dtype'):
# Duck-typing onp types
return value.dtype
elif isinstance(value, bool):
return np.bool_
elif isinstance(value, six.integer_types):
return np.int32
elif isinstance(value, float):
return np.float32
elif isinstance(value, complex):
return np.complex128
elif isinstance(value, (tuple, list)):
# Try inferring the type from items in the object if possible.
for v in nest.flatten(value):
if hasattr(v, 'dtype'):
return v.dtype
try: # Finally fall back to raw types (int, bool).
return _infer_dtype(value[0], default_dtype)
except (IndexError, TypeError):
return default_dtype
raise ValueError(('Attempt to convert a value ({})'
' with an unsupported type ({}) to a Tensor.').format(
value, type(value)))
class _Int64ToInt32Error(TypeError):
"""Error thrown when trying to convert an int64 to int32."""
def __init__(self, int_value):
self.int_value = int_value
super(_Int64ToInt32Error, self).__init__('Overflow when casting an int64 to'
' an int32.')
class _FloatToIntError(TypeError):
"""Error thrown when trying to convert a float to an int."""
def _is_int64(value):
return value > onp.iinfo(onp.int32).max or value < onp.iinfo(onp.int32).min
def _default_convert_to_tensor(value, dtype=None):
"""Default tensor conversion function for array, bool, int, float, and complex."""
inferred_dtype = _infer_dtype(value, np.float32)
# When a dtype is provided, we can go ahead and try converting to the dtype
# and force overflow/underflow if an int64 is converted to an int32.
if dtype is not None:
try:
return _default_convert_to_tensor_with_dtype(value, dtype)
except _Int64ToInt32Error as e:
# Force conversion to int32 if requested
return e.int_value
# If no dtype is provided, we try the inferred dtype and fallback to int64 or
# float32 depending on the type of conversion error we see.
try:
return _default_convert_to_tensor_with_dtype(value, inferred_dtype)
except _Int64ToInt32Error as e:
return np.array(value, dtype=np.int64)
except _FloatToIntError as e:
return np.array(value, dtype=np.float32)
class TypeConversionError(TypeError):
def __init__(self, value, dtype):
super(TypeConversionError, self).__init__(
'Cannot convert {} to array of dtype {}'.format(value, dtype))
class MixedTypesError(ValueError):
def __init__(self):
super(MixedTypesError, self).__init__('Can\'t convert Python sequence with'
' mixed types to Tensor.')
def _default_convert_to_tensor_with_dtype(value, dtype,
error_if_mismatch=False):
"""Converts a value to a tensor with a given dtype.
Args:
value: An object to be converted to tensor.
dtype: A NPTF dtype.
error_if_mismatch: Enables a stricter check for use when converting an
iterable from a tensor.
Returns:
A tensor.
Raises:
TypeConversionError: If type conversion fails.
MixedTypesError: If types are mismatched in an iterable context.
ValueError: If object isn't convertible to tensor.
_Int64ToInt32Error: If trying to convert an int64 to an int32.
_FloatToIntError: If trying to convert a float to an int.
"""
is_arraylike = hasattr(value, 'dtype')
if is_arraylike:
# Duck-typed for `onp.array`/`onp.generic`
arr = np.array(value)
if dtype is not None:
# arr.astype(None) forces conversion to float64
return arr.astype(dtype)
return arr
elif isinstance(value, complex):
dtype_compatible = np.issubdtype(dtype, np.complexfloating)
if not dtype_compatible:
if error_if_mismatch:
raise MixedTypesError()
raise TypeConversionError(value, dtype)
elif isinstance(value, bool):
# Bool check needs to happen before int check because bools are instances of
# int.
dtype_compatible = (dtype == np.bool_ or np.issubdtype(dtype, np.integer)
or np.issubdtype(dtype, np.floating))
if not dtype_compatible:
if error_if_mismatch:
raise MixedTypesError()
raise TypeError(value, dtype)
elif isinstance(value, six.integer_types):
if error_if_mismatch and not (np.issubdtype(dtype, np.integer)
or np.issubdtype(dtype, np.floating)):
raise MixedTypesError()
if dtype == np.int32 and _is_int64(value):
raise _Int64ToInt32Error(np.array(value, dtype=dtype))
if dtype == np.bool_:
# Can't downcast an int to a bool
raise TypeConversionError(value, dtype)
elif isinstance(value, float):
if error_if_mismatch and not (np.issubdtype(dtype, np.integer)
or np.issubdtype(dtype, np.floating)):
raise MixedTypesError()
if np.issubdtype(dtype, np.integer):
raise _FloatToIntError(
'Cannot convert {} to array of dtype {}'.format(value, dtype))
if not (np.issubdtype(dtype, np.floating)
or np.issubdtype(dtype, np.complexfloating)):
raise TypeConversionError(value, dtype)
else:
# Try to iterate through object and throw ValueError if we can't.
if hasattr(value, '__getitem__'):
ret = []
error_in_list = False
for v in value:
ret.append(_default_convert_to_tensor_with_dtype(
v, dtype, error_if_mismatch=error_in_list))
error_in_list = True
value = ret
else:
raise ValueError(
('Attempting to convert a value {} with an'
' unsupported type {} to a Tensor.').format(value, type(value)))
return np.array(value, dtype=dtype)
@contextlib.contextmanager
def _init_scope():
yield
# --- Begin Public Functions --------------------------------------------------
class GradientTape(object):
"""tf.GradientTape stub."""
def __init__(self, persistent=False, watch_accessed_variables=True): # pylint: disable=unused-argument
raise NotImplementedError('GradientTape not currently supported in JAX and '
'NumPy backends.')
def __enter__(self):
return self
def __exit__(self, typ, value, traceback): # pylint: disable=unused-argument
pass
def watch(self, tensor): # pylint: disable=unused-argument
pass
def gradient(self, target, sources, output_gradients=None, # pylint: disable=unused-argument
unconnected_gradients=None): # pylint: disable=unused-argument
raise NotImplementedError
def batch_jacobian(self, target, source, # pylint: disable=unused-argument
unconnected_gradients=None, # pylint: disable=unused-argument
parallel_iterations=None, experimental_use_pfor=True): # pylint: disable=unused-argument
raise NotImplementedError
bitcast = utils.copy_docstring(
'tf.bitcast',
lambda input, type, name=None: convert_to_tensor( # pylint: disable=g-long-lambda
input, dtype_hint=type).view(type))
broadcast_dynamic_shape = utils.copy_docstring(
'tf.broadcast_dynamic_shape', _broadcast_dynamic_shape)
broadcast_static_shape = utils.copy_docstring(
'tf.broadcast_static_shape', _broadcast_static_shape)
broadcast_to = utils.copy_docstring(
'tf.broadcast_to',
lambda input, shape, name=None: np.broadcast_to(input, shape))
def _cast(x, dtype):
x = np.asarray(x)
if (np.issubdtype(x.dtype, np.complexfloating) and
not np.issubdtype(dtype, np.complexfloating)):
x = np.real(x)
return x.astype(dtype)
cast = utils.copy_docstring(
'tf.cast',
lambda x, dtype, name=None: _cast(x, utils.numpy_dtype(dtype)))
clip_by_value = utils.copy_docstring(
'tf.clip_by_value',
lambda t, clip_value_min, clip_value_max, name=None: # pylint: disable=g-long-lambda
np.clip(t, clip_value_min, clip_value_max))
constant = utils.copy_docstring(
'tf.constant',
_constant)
control_dependencies = utils.copy_docstring(
'tf.control_dependencies',
_control_dependencies)
convert_to_tensor = utils.copy_docstring(
'tf.convert_to_tensor',
_convert_to_tensor)
def _custom_gradient(f):
"""JAX implementation of tf.custom_gradient."""
if not JAX_MODE:
# Numpy backend ignores custom gradients, so we do too.
return lambda *args, **kwargs: f(*args, **kwargs)[0]
@jax.custom_gradient
@functools.wraps(f)
def wrapped(*args, **kwargs):
value, vjp = f(*args, **kwargs)
def vjp_(cts_out):
cts_in = vjp(cts_out)
if isinstance(cts_in, list):
cts_in = tuple(cts_in)
return cts_in
return value, vjp_
return wrapped
custom_gradient = utils.copy_docstring(
'tf.custom_gradient', _custom_gradient)
device = lambda _: _NullContext()
def _ensure_shape(x, shape, name=None): # pylint: disable=unused-argument
x_shape = tensor_shape.TensorShape(x.shape)
shape = tensor_shape.TensorShape(shape)
if not shape.is_compatible_with(x_shape):
msg = 'Shape of tensor x {} is not compatible with expected shape {}'
raise ValueError(msg.format(x_shape, shape))
return x
ensure_shape = utils.copy_docstring(
'tf.ensure_shape', _ensure_shape)
executing_eagerly = utils.copy_docstring(
'tf.executing_eagerly',
lambda: True)
def _get_static_value_jax(tensor, partial=False):
"""JAX implementation of tf.get_static_value."""
del partial
if isinstance(tensor, jax.core.Tracer):
return None
if isinstance(tensor, NumpyVariable):
return None
if isinstance(tensor, Module):
return None
if isinstance(tensor, np.ndarray):
return onp.array(tensor)
return tensor
def _get_static_value_numpy(tensor, partial=False):
"""NumPy implementation of tf.get_static_value."""
del partial
if isinstance(tensor, NumpyVariable):
return None
if isinstance(tensor, Module):
return None
return tensor
get_static_value = utils.copy_docstring(
'tf.get_static_value',
_get_static_value_jax if JAX_MODE else _get_static_value_numpy)
identity = utils.copy_docstring(
'tf.identity',
lambda input, name=None: np.array(input))
is_tensor = utils.copy_docstring(
'tf.is_tensor',
lambda x: isinstance(x, Tensor))
init_scope = utils.copy_docstring('tf.init_scope', _init_scope)
class name_scope(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,
and `MyOp/c`.
If the scope name already exists, the name will be made unique by appending
`_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`,
etc.
"""
@property
def name(self):
return self._name
def __init__(self, name, *args, **kwargs):
del args, kwargs
self._name = name
def __enter__(self):
return self._name
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions.
newaxis = np.newaxis
if JAX_MODE:
from jax import lax # pylint: disable=g-import-not-at-top
stop_gradient = utils.copy_docstring(
'tf.stop_gradient',
lambda input, name=None: lax.stop_gradient(input))
else:
stop_gradient = utils.copy_docstring(
'tf.stop_gradient',
lambda input, name=None: np.array(input))
def _convert_tensorshape_to_tensor(value, dtype=None):
"""Copied from TF's TensorShape conversion."""
if not value.is_fully_defined():
raise ValueError(
'Cannot convert a partially known TensorShape to a Tensor: {}'.format(
value))
value_list = value.as_list()
int64_value = 0
for dim in value_list:
if dim >= 2**31:
int64_value = dim
break
if dtype is not None:
if dtype not in (np.int32, np.int64):
raise TypeConversionError(value, dtype)
if dtype == np.int32 and int64_value:
raise ValueError('Cannot convert a TensorShape to dtype int32; '
'a dimension is too large ({})'.format(int64_value))
else:
dtype = np.int64 if int64_value else np.int32
return convert_to_tensor(value_list, dtype=dtype)
register_tensor_conversion_function(tensor_shape.TensorShape,
_convert_tensorshape_to_tensor)
def _convert_dimension_to_tensor(value, dtype=None):
dtype = dtype or np.int32
if dtype not in (np.int32, np.int64):
raise TypeConversionError(value, dtype)
return convert_to_tensor(tensor_shape.dimension_value(value), dtype=dtype)
register_tensor_conversion_function(tensor_shape.Dimension,
_convert_dimension_to_tensor)
class NumpyVariable(getattr(wrapt, 'ObjectProxy', object)):
"""Stand-in for tf.Variable."""
__slots__ = ('initializer',)
# pylint: disable=unused-argument
def __init__(
self,
initial_value=None,
trainable=True,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
import_scope=None,
constraint=None,
shape=None):
assert constraint is None
v = convert_to_tensor(initial_value)
if dtype is not None:
v = v.astype(utils.numpy_dtype(dtype))
super(NumpyVariable, self).__init__(v)
self._self_name = name
self.initializer = None
# pylint: enable=unused-argument
@property
def name(self):
return self._self_name if self._self_name is not None else str(id(self))
def __array__(self, dtype=None):
if dtype is not None:
dtype = utils.numpy_dtype(dtype)
return self.__wrapped__.__array__(dtype)
# Passing in dtype=None to __array__ has differing behavior in numpy.
# When an `np.ndarray` has `.__array__(None)` invoked, the array is casted
# to `float64`. Thus we handle this case separately.
return self.__wrapped__.__array__()
def assign(self, value, **_):
super(NumpyVariable, self).__init__(onp.array(value, dtype=self.dtype))
return self
def assign_add(self, value, **_):
super(NumpyVariable, self).__init__(
onp.array(self, dtype=self.dtype) + onp.array(value, dtype=self.dtype))
return self
def assign_sub(self, value, **_):
super(NumpyVariable, self).__init__(
onp.array(self, dtype=self.dtype) - onp.array(value, dtype=self.dtype))
return self
if JAX_MODE:
jax.interpreters.xla.canonicalize_dtype_handlers[NumpyVariable] = (
jax.interpreters.xla.canonicalize_dtype_handlers[onp.ndarray])
jax.interpreters.xla.pytype_aval_mappings[NumpyVariable] = (
jax.interpreters.xla.pytype_aval_mappings[onp.ndarray])
jax.core.pytype_aval_mappings[NumpyVariable] = (
jax.core.pytype_aval_mappings[onp.ndarray])
def _convert_variable_to_tensor(value, dtype=None):
return convert_to_tensor(value.__wrapped__, dtype=dtype)
register_tensor_conversion_function(NumpyVariable, _convert_variable_to_tensor)
Variable = NumpyVariable
class _TensorMeta(type(np.ndarray)):
@classmethod
def __instancecheck__(cls, instance):
if JAX_MODE:
return isinstance(instance, (jax.xla.DeviceArray,
jax.core.Tracer))
return isinstance(instance, np.ndarray)
class Tensor(six.with_metaclass(_TensorMeta)):
OVERLOADABLE_OPERATORS = frozenset((
# Binary.
'__add__',
'__radd__',
'__sub__',
'__rsub__',
'__mul__',
'__rmul__',
'__truediv__',
'__rtruediv__',
'__floordiv__',
'__rfloordiv__',
'__mod__',
'__rmod__',
'__lt__',
'__le__',
'__gt__',
'__ge__',
'__ne__',
'__eq__',
'__and__',
'__rand__',
'__or__',
'__ror__',
'__xor__',
'__rxor__',
'__getitem__',
'__pow__',
'__rpow__',
# Unary.
'__invert__',
'__neg__',
'__abs__',
'__matmul__',
'__rmatmul__'
))
class Module(object):
"""tf.Module."""
_TF_MODULE_IGNORED_PROPERTIES = frozenset()
def __init__(self, name):
self._name = name
def _no_dependency(self, x):
return x
@property
def trainable_variables(self):
return []
@property
def variables(self):
return []
enable_v2_behavior = lambda: None
| 30.937151 | 110 | 0.687012 |
13dce8f8f3e57ff4a6e01d63174a6da7ae30ef3d | 76,244 | py | Python | lib/galaxy/datatypes/interval.py | igorhollaender/sirv_dashboard | 85aec60b80ef6f561d89398e3da5963d3d0f2aa4 | [
"CC-BY-3.0"
] | 2 | 2018-10-14T16:42:39.000Z | 2018-10-14T16:42:41.000Z | lib/galaxy/datatypes/interval.py | igorhollaender/OBSOLETE_sirv_dashboard | 85aec60b80ef6f561d89398e3da5963d3d0f2aa4 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/datatypes/interval.py | igorhollaender/OBSOLETE_sirv_dashboard | 85aec60b80ef6f561d89398e3da5963d3d0f2aa4 | [
"CC-BY-3.0"
] | null | null | null | """
Interval datatypes
"""
import logging
import math
import os
import sys
import tempfile
import urllib
import numpy
from bx.intervals.io import GenomicIntervalReader, ParseError
from galaxy import util
from galaxy.datatypes import metadata
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.sniff import get_headers
from galaxy.datatypes.tabular import Tabular
from galaxy.datatypes.util.gff_util import parse_gff_attributes
from galaxy.web import url_for
import data
import dataproviders
log = logging.getLogger(__name__)
# Contains the meta columns and the words that map to it; list aliases on the
# right side of the : in decreasing order of priority
alias_spec = {
'chromCol' : [ 'chrom' , 'CHROMOSOME' , 'CHROM', 'Chromosome Name' ],
'startCol' : [ 'start' , 'START', 'chromStart', 'txStart', 'Start Position (bp)' ],
'endCol' : [ 'end' , 'END' , 'STOP', 'chromEnd', 'txEnd', 'End Position (bp)' ],
'strandCol' : [ 'strand', 'STRAND', 'Strand' ],
'nameCol' : [ 'name', 'NAME', 'Name', 'name2', 'NAME2', 'Name2', 'Ensembl Gene ID', 'Ensembl Transcript ID', 'Ensembl Peptide ID' ]
}
# a little faster lookup
alias_helper = {}
for key, value in alias_spec.items():
for elem in value:
alias_helper[elem] = key
# Constants for configuring viewport generation: If a line is greater than
# VIEWPORT_MAX_READS_PER_LINE * VIEWPORT_READLINE_BUFFER_SIZE bytes in size,
# then we will not generate a viewport for that dataset
VIEWPORT_READLINE_BUFFER_SIZE = 1048576 # 1MB
VIEWPORT_MAX_READS_PER_LINE = 10
@dataproviders.decorators.has_dataproviders
class Interval( Tabular ):
"""Tab delimited data containing interval information"""
edam_format = "format_3475"
file_ext = "interval"
line_class = "region"
track_type = "FeatureTrack"
data_sources = { "data": "tabix", "index": "bigwig" }
"""Add metadata elements"""
MetadataElement( name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter )
MetadataElement( name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter )
MetadataElement( name="endCol", default=3, desc="End column", param=metadata.ColumnParameter )
MetadataElement( name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0 )
MetadataElement( name="nameCol", desc="Name/Identifier column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0 )
MetadataElement( name="columns", default=3, desc="Number of columns", readonly=True, visible=False )
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display apps"""
Tabular.__init__(self, **kwd)
self.add_display_app( 'ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links' )
def init_meta( self, dataset, copy_from=None ):
Tabular.init_meta( self, dataset, copy_from=copy_from )
def set_meta( self, dataset, overwrite=True, first_line_is_header=False, **kwd ):
"""Tries to guess from the line the location number of the column for the chromosome, region start-end and strand"""
Tabular.set_meta( self, dataset, overwrite=overwrite, skip=0 )
if dataset.has_data():
empty_line_count = 0
num_check_lines = 100 # only check up to this many non empty lines
for i, line in enumerate( file( dataset.file_name ) ):
line = line.rstrip( '\r\n' )
if line:
if ( first_line_is_header or line[0] == '#' ):
self.init_meta( dataset )
line = line.strip( '#' )
elems = line.split( '\t' )
for meta_name, header_list in alias_spec.iteritems():
for header_val in header_list:
if header_val in elems:
# found highest priority header to meta_name
setattr( dataset.metadata, meta_name, elems.index( header_val ) + 1 )
break # next meta_name
break # Our metadata is set, so break out of the outer loop
else:
# Header lines in Interval files are optional. For example, BED is Interval but has no header.
# We'll make a best guess at the location of the metadata columns.
metadata_is_set = False
elems = line.split( '\t' )
if len( elems ) > 2:
for str in data.col1_startswith:
if line.lower().startswith( str ):
if overwrite or not dataset.metadata.element_is_set( 'chromCol' ):
dataset.metadata.chromCol = 1
try:
int( elems[1] )
if overwrite or not dataset.metadata.element_is_set( 'startCol' ):
dataset.metadata.startCol = 2
except:
pass # Metadata default will be used
try:
int( elems[2] )
if overwrite or not dataset.metadata.element_is_set( 'endCol' ):
dataset.metadata.endCol = 3
except:
pass # Metadata default will be used
# we no longer want to guess that this column is the 'name', name must now be set manually for interval files
# we will still guess at the strand, as we can make a more educated guess
# if len( elems ) > 3:
# try:
# int( elems[3] )
# except:
# if overwrite or not dataset.metadata.element_is_set( 'nameCol' ):
# dataset.metadata.nameCol = 4
if len( elems ) < 6 or elems[5] not in data.valid_strand:
if overwrite or not dataset.metadata.element_is_set( 'strandCol' ):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set( 'strandCol' ):
dataset.metadata.strandCol = 6
metadata_is_set = True
break
if metadata_is_set or ( i - empty_line_count ) > num_check_lines:
break # Our metadata is set or we examined 100 non-empty lines, so break out of the outer loop
else:
empty_line_count += 1
def displayable( self, dataset ):
try:
return dataset.has_data() \
and dataset.state == dataset.states.OK \
and dataset.metadata.columns > 0 \
and dataset.metadata.data_lines != 0 \
and dataset.metadata.chromCol \
and dataset.metadata.startCol \
and dataset.metadata.endCol
except:
return False
def get_estimated_display_viewport( self, dataset, chrom_col=None, start_col=None, end_col=None ):
"""Return a chrom, start, stop tuple for viewing a file."""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max( viewport_feature_count, 500 ) # maximum number of lines to check; includes comment lines
if not self.displayable( dataset ):
return ( None, None, None )
try:
# If column indexes were not passwed, determine from metadata
if chrom_col is None:
chrom_col = int( dataset.metadata.chromCol ) - 1
if start_col is None:
start_col = int( dataset.metadata.startCol ) - 1
if end_col is None:
end_col = int( dataset.metadata.endCol ) - 1
# Scan lines of file to find a reasonable chromosome and range
chrom = None
start = sys.maxsize
end = 0
max_col = max( chrom_col, start_col, end_col )
fh = open( dataset.file_name )
while True:
line = fh.readline( VIEWPORT_READLINE_BUFFER_SIZE )
# Stop if at end of file
if not line:
break
# Skip comment lines
if not line.startswith( '#' ):
try:
fields = line.rstrip().split( '\t' )
if len( fields ) > max_col:
if chrom is None or chrom == fields[ chrom_col ]:
start = min( start, int( fields[ start_col ] ) )
end = max( end, int( fields[ end_col ] ) )
# Set chrom last, in case start and end are not integers
chrom = fields[ chrom_col ]
viewport_feature_count -= 1
except Exception:
# Most likely a non-integer field has been encountered
# for start / stop. Just ignore and make sure we finish
# reading the line and decrementing the counters.
pass
# Make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip( '\n\r' ) == line:
assert readline_count > 0, Exception( 'Viewport readline count exceeded for dataset %s.' % dataset.id )
line = fh.readline( VIEWPORT_READLINE_BUFFER_SIZE )
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if chrom is not None:
return ( chrom, str( start ), str( end ) ) # Necessary to return strings?
except Exception:
# Unexpected error, possibly missing metadata
log.exception( "Exception caught attempting to generate viewport for dataset '%d'", dataset.id )
return ( None, None, None )
def as_ucsc_display_file( self, dataset, **kwd ):
"""Returns file contents with only the bed data"""
fd, temp_name = tempfile.mkstemp()
c, s, e, t, n = dataset.metadata.chromCol, dataset.metadata.startCol, dataset.metadata.endCol, dataset.metadata.strandCol or 0, dataset.metadata.nameCol or 0
c, s, e, t, n = int(c) - 1, int(s) - 1, int(e) - 1, int(t) - 1, int(n) - 1
if t >= 0: # strand column (should) exists
for i, elems in enumerate( util.file_iter(dataset.file_name) ):
strand = "+"
name = "region_%i" % i
if n >= 0 and n < len( elems ):
name = elems[n]
if t < len(elems):
strand = elems[t]
tmp = [ elems[c], elems[s], elems[e], name, '0', strand ]
os.write(fd, '%s\n' % '\t'.join(tmp) )
elif n >= 0: # name column (should) exists
for i, elems in enumerate( util.file_iter(dataset.file_name) ):
name = "region_%i" % i
if n >= 0 and n < len( elems ):
name = elems[n]
tmp = [ elems[c], elems[s], elems[e], name ]
os.write(fd, '%s\n' % '\t'.join(tmp) )
else:
for elems in util.file_iter(dataset.file_name):
tmp = [ elems[c], elems[s], elems[e] ]
os.write(fd, '%s\n' % '\t'.join(tmp) )
os.close(fd)
return open(temp_name)
def display_peek( self, dataset ):
"""Returns formated html of peek"""
return Tabular.make_html_table( self, dataset, column_parameter_alias={'chromCol': 'Chrom', 'startCol': 'Start', 'endCol': 'End', 'strandCol': 'Strand', 'nameCol': 'Name'} )
def ucsc_links( self, dataset, type, app, base_url ):
"""
Generate links to UCSC genome browser sites based on the dbkey
and content of dataset.
"""
# Filter UCSC sites to only those that are supported by this build and
# enabled.
valid_sites = [ ( name, url )
for name, url in app.datatypes_registry.get_legacy_sites_by_build('ucsc', dataset.dbkey )
if name in app.datatypes_registry.get_display_sites('ucsc') ]
if not valid_sites:
return []
# If there are any valid sites, we need to generate the estimated
# viewport
chrom, start, stop = self.get_estimated_display_viewport( dataset )
if chrom is None:
return []
# Accumulate links for valid sites
ret_val = []
for site_name, site_url in valid_sites:
internal_url = url_for( controller='dataset', dataset_id=dataset.id,
action='display_at', filename='ucsc_' + site_name )
display_url = urllib.quote_plus( "%s%s/display_as?id=%i&display_app=%s&authz_method=display_at"
% (base_url, url_for( controller='root' ), dataset.id, type) )
redirect_url = urllib.quote_plus( "%sdb=%s&position=%s:%s-%s&hgt.customText=%%s"
% (site_url, dataset.dbkey, chrom, start, stop ) )
link = '%s?redirect_url=%s&display_url=%s' % ( internal_url, redirect_url, display_url )
ret_val.append( ( site_name, link ) )
return ret_val
def validate( self, dataset ):
"""Validate an interval file using the bx GenomicIntervalReader"""
errors = list()
c, s, e, t = dataset.metadata.chromCol, dataset.metadata.startCol, dataset.metadata.endCol, dataset.metadata.strandCol
c, s, e, t = int(c) - 1, int(s) - 1, int(e) - 1, int(t) - 1
infile = open(dataset.file_name, "r")
reader = GenomicIntervalReader(
infile,
chrom_col=c,
start_col=s,
end_col=e,
strand_col=t)
while True:
try:
reader.next()
except ParseError as e:
errors.append(e)
except StopIteration:
infile.close()
return errors
def repair_methods( self, dataset ):
"""Return options for removing errors along with a description"""
return [("lines", "Remove erroneous lines")]
def sniff( self, filename ):
"""
Checks for 'intervalness'
This format is mostly used by galaxy itself. Valid interval files should include
a valid header comment, but this seems to be loosely regulated.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test_space.txt' )
>>> Interval().sniff( fname )
False
>>> fname = get_test_fname( 'interval.interval' )
>>> Interval().sniff( fname )
True
"""
headers = get_headers( filename, '\t' )
try:
"""
If we got here, we already know the file is_column_based and is not bed,
so we'll just look for some valid data.
"""
for hdr in headers:
if hdr and not hdr[0].startswith( '#' ):
if len(hdr) < 3:
return False
try:
# Assume chrom start and end are in column positions 1 and 2
# respectively ( for 0 based columns )
int( hdr[1] )
int( hdr[2] )
except:
return False
return True
except:
return False
def get_track_window(self, dataset, data, start, end):
"""
Assumes the incoming track data is sorted already.
"""
window = list()
for record in data:
fields = record.rstrip("\n\r").split("\t")
record_chrom = fields[dataset.metadata.chromCol - 1]
record_start = int(fields[dataset.metadata.startCol - 1])
record_end = int(fields[dataset.metadata.endCol - 1])
if record_start < end and record_end > start:
window.append( (record_chrom, record_start, record_end) ) # Yes I did want to use a generator here, but it doesn't work downstream
return window
def get_track_resolution( self, dataset, start, end):
return None
# ------------- Dataproviders
@dataproviders.decorators.dataprovider_factory( 'genomic-region',
dataproviders.dataset.GenomicRegionDataProvider.settings )
def genomic_region_dataprovider( self, dataset, **settings ):
return dataproviders.dataset.GenomicRegionDataProvider( dataset, **settings )
@dataproviders.decorators.dataprovider_factory( 'genomic-region-dict',
dataproviders.dataset.GenomicRegionDataProvider.settings )
def genomic_region_dict_dataprovider( self, dataset, **settings ):
settings[ 'named_columns' ] = True
return self.genomic_region_dataprovider( dataset, **settings )
@dataproviders.decorators.dataprovider_factory( 'interval',
dataproviders.dataset.IntervalDataProvider.settings )
def interval_dataprovider( self, dataset, **settings ):
return dataproviders.dataset.IntervalDataProvider( dataset, **settings )
@dataproviders.decorators.dataprovider_factory( 'interval-dict',
dataproviders.dataset.IntervalDataProvider.settings )
def interval_dict_dataprovider( self, dataset, **settings ):
settings[ 'named_columns' ] = True
return self.interval_dataprovider( dataset, **settings )
class BedGraph( Interval ):
"""Tab delimited chrom/start/end/datavalue dataset"""
file_ext = "bedgraph"
track_type = "LineTrack"
data_sources = { "data": "bigwig", "index": "bigwig" }
def as_ucsc_display_file( self, dataset, **kwd ):
"""
Returns file contents as is with no modifications.
TODO: this is a functional stub and will need to be enhanced moving forward to provide additional support for bedgraph.
"""
return open( dataset.file_name )
def get_estimated_display_viewport( self, dataset, chrom_col=0, start_col=1, end_col=2 ):
"""
Set viewport based on dataset's first 100 lines.
"""
return Interval.get_estimated_display_viewport( self, dataset, chrom_col=chrom_col, start_col=start_col, end_col=end_col )
class Bed( Interval ):
"""Tab delimited data in BED format"""
edam_format = "format_3003"
file_ext = "bed"
data_sources = { "data": "tabix", "index": "bigwig", "feature_search": "fli" }
track_type = Interval.track_type
"""Add metadata elements"""
MetadataElement( name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter )
MetadataElement( name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter )
MetadataElement( name="endCol", default=3, desc="End column", param=metadata.ColumnParameter )
MetadataElement( name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0 )
MetadataElement( name="columns", default=3, desc="Number of columns", readonly=True, visible=False )
MetadataElement( name="viz_filter_cols", desc="Score column for visualization", default=[4], param=metadata.ColumnParameter, optional=True, multiple=True )
# do we need to repeat these? they are the same as should be inherited from interval type
def set_meta( self, dataset, overwrite=True, **kwd ):
"""Sets the metadata information for datasets previously determined to be in bed format."""
i = 0
if dataset.has_data():
for i, line in enumerate( file(dataset.file_name) ):
metadata_set = False
line = line.rstrip('\r\n')
if line and not line.startswith('#'):
elems = line.split('\t')
if len(elems) > 2:
for startswith in data.col1_startswith:
if line.lower().startswith( startswith ):
if len( elems ) > 3:
if overwrite or not dataset.metadata.element_is_set( 'nameCol' ):
dataset.metadata.nameCol = 4
if len(elems) < 6:
if overwrite or not dataset.metadata.element_is_set( 'strandCol' ):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set( 'strandCol' ):
dataset.metadata.strandCol = 6
metadata_set = True
break
if metadata_set:
break
Tabular.set_meta( self, dataset, overwrite=overwrite, skip=i )
def as_ucsc_display_file( self, dataset, **kwd ):
"""Returns file contents with only the bed data. If bed 6+, treat as interval."""
for line in open(dataset.file_name):
line = line.strip()
if line == "" or line.startswith("#"):
continue
fields = line.split('\t')
"""check to see if this file doesn't conform to strict genome browser accepted bed"""
try:
if len(fields) > 12:
return Interval.as_ucsc_display_file(self, dataset) # too many fields
if len(fields) > 6:
int(fields[6])
if len(fields) > 7:
int(fields[7])
if len(fields) > 8:
if int(fields[8]) != 0:
return Interval.as_ucsc_display_file(self, dataset)
if len(fields) > 9:
int(fields[9])
if len(fields) > 10:
fields2 = fields[10].rstrip(",").split(",") # remove trailing comma and split on comma
for field in fields2:
int(field)
if len(fields) > 11:
fields2 = fields[11].rstrip(",").split(",") # remove trailing comma and split on comma
for field in fields2:
int(field)
except:
return Interval.as_ucsc_display_file(self, dataset)
# only check first line for proper form
break
try:
return open(dataset.file_name)
except:
return "This item contains no content"
def sniff( self, filename ):
"""
Checks for 'bedness'
BED lines have three required fields and nine additional optional fields.
The number of fields per line must be consistent throughout any single set of data in
an annotation track. The order of the optional fields is binding: lower-numbered
fields must always be populated if higher-numbered fields are used. The data type of
all 12 columns is:
1-str, 2-int, 3-int, 4-str, 5-int, 6-str, 7-int, 8-int, 9-int or list, 10-int, 11-list, 12-list
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format1
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test_tab.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'interval1.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'complete.bed' )
>>> Bed().sniff( fname )
True
"""
headers = get_headers( filename, '\t' )
try:
if not headers:
return False
for hdr in headers:
if (hdr[0] == '' or hdr[0].startswith( '#' )):
continue
valid_col1 = False
if len(hdr) < 3 or len(hdr) > 12:
return False
for str in data.col1_startswith:
if hdr[0].lower().startswith(str):
valid_col1 = True
break
if valid_col1:
try:
int( hdr[1] )
int( hdr[2] )
except:
return False
if len( hdr ) > 4:
# hdr[3] is a string, 'name', which defines the name of the BED line - difficult to test for this.
# hdr[4] is an int, 'score', a score between 0 and 1000.
try:
if int( hdr[4] ) < 0 or int( hdr[4] ) > 1000:
return False
except:
return False
if len( hdr ) > 5:
# hdr[5] is strand
if hdr[5] not in data.valid_strand:
return False
if len( hdr ) > 6:
# hdr[6] is thickStart, the starting position at which the feature is drawn thickly.
try:
int( hdr[6] )
except:
return False
if len( hdr ) > 7:
# hdr[7] is thickEnd, the ending position at which the feature is drawn thickly
try:
int( hdr[7] )
except:
return False
if len( hdr ) > 8:
# hdr[8] is itemRgb, an RGB value of the form R,G,B (e.g. 255,0,0). However, this could also be an int (e.g., 0)
try:
int( hdr[8] )
except:
try:
hdr[8].split(',')
except:
return False
if len( hdr ) > 9:
# hdr[9] is blockCount, the number of blocks (exons) in the BED line.
try:
block_count = int( hdr[9] )
except:
return False
if len( hdr ) > 10:
# hdr[10] is blockSizes - A comma-separated list of the block sizes.
# Sometimes the blosck_sizes and block_starts lists end in extra commas
try:
block_sizes = hdr[10].rstrip(',').split(',')
except:
return False
if len( hdr ) > 11:
# hdr[11] is blockStarts - A comma-separated list of block starts.
try:
block_starts = hdr[11].rstrip(',').split(',')
except:
return False
if len(block_sizes) != block_count or len(block_starts) != block_count:
return False
else:
return False
return True
except:
return False
class BedStrict( Bed ):
"""Tab delimited data in strict BED format - no non-standard columns allowed"""
file_ext = "bedstrict"
# no user change of datatype allowed
allow_datatype_change = False
# Read only metadata elements
MetadataElement( name="chromCol", default=1, desc="Chrom column", readonly=True, param=metadata.MetadataParameter )
MetadataElement( name="startCol", default=2, desc="Start column", readonly=True, param=metadata.MetadataParameter ) # TODO: start and end should be able to be set to these or the proper thick[start/end]?
MetadataElement( name="endCol", default=3, desc="End column", readonly=True, param=metadata.MetadataParameter )
MetadataElement( name="strandCol", desc="Strand column (click box & select)", readonly=True, param=metadata.MetadataParameter, no_value=0, optional=True )
MetadataElement( name="nameCol", desc="Name/Identifier column (click box & select)", readonly=True, param=metadata.MetadataParameter, no_value=0, optional=True )
MetadataElement( name="columns", default=3, desc="Number of columns", readonly=True, visible=False )
def __init__( self, **kwd ):
Tabular.__init__( self, **kwd )
self.clear_display_apps() # only new style display applications for this datatype
def set_meta( self, dataset, overwrite=True, **kwd ):
Tabular.set_meta( self, dataset, overwrite=overwrite, **kwd) # need column count first
if dataset.metadata.columns >= 4:
dataset.metadata.nameCol = 4
if dataset.metadata.columns >= 6:
dataset.metadata.strandCol = 6
def sniff( self, filename ):
return False # NOTE: This would require aggressively validating the entire file
class Bed6( BedStrict ):
"""Tab delimited data in strict BED format - no non-standard columns allowed; column count forced to 6"""
file_ext = "bed6"
class Bed12( BedStrict ):
"""Tab delimited data in strict BED format - no non-standard columns allowed; column count forced to 12"""
file_ext = "bed12"
class _RemoteCallMixin:
def _get_remote_call_url( self, redirect_url, site_name, dataset, type, app, base_url ):
"""Retrieve the URL to call out to an external site and retrieve data.
This routes our external URL through a local galaxy instance which makes
the data available, followed by redirecting to the remote site with a
link back to the available information.
"""
internal_url = "%s" % url_for( controller='dataset', dataset_id=dataset.id, action='display_at', filename='%s_%s' % ( type, site_name ) )
base_url = app.config.get( "display_at_callback", base_url )
display_url = urllib.quote_plus( "%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" %
( base_url, url_for( controller='root' ), dataset.id, type ) )
link = '%s?redirect_url=%s&display_url=%s' % ( internal_url, redirect_url, display_url )
return link
@dataproviders.decorators.has_dataproviders
class Gff( Tabular, _RemoteCallMixin ):
"""Tab delimited data in Gff format"""
edam_format = "format_2305"
file_ext = "gff"
column_names = [ 'Seqname', 'Source', 'Feature', 'Start', 'End', 'Score', 'Strand', 'Frame', 'Group' ]
data_sources = { "data": "interval_index", "index": "bigwig", "feature_search": "fli" }
track_type = Interval.track_type
"""Add metadata elements"""
MetadataElement( name="columns", default=9, desc="Number of columns", readonly=True, visible=False )
MetadataElement( name="column_types", default=['str', 'str', 'str', 'int', 'int', 'int', 'str', 'str', 'str'],
param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False )
MetadataElement( name="attributes", default=0, desc="Number of attributes", readonly=True, visible=False, no_value=0 )
MetadataElement( name="attribute_types", default={}, desc="Attribute types", param=metadata.DictParameter, readonly=True, visible=False, no_value=[] )
def __init__( self, **kwd ):
"""Initialize datatype, by adding GBrowse display app"""
Tabular.__init__(self, **kwd)
self.add_display_app( 'ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links' )
self.add_display_app( 'gbrowse', 'display in Gbrowse', 'as_gbrowse_display_file', 'gbrowse_links' )
def set_attribute_metadata( self, dataset ):
"""
Sets metadata elements for dataset's attributes.
"""
# Use first N lines to set metadata for dataset attributes. Attributes
# not found in the first N lines will not have metadata.
num_lines = 200
attribute_types = {}
for i, line in enumerate( file( dataset.file_name ) ):
if line and not line.startswith( '#' ):
elems = line.split( '\t' )
if len( elems ) == 9:
try:
# Loop through attributes to set types.
for name, value in parse_gff_attributes( elems[8] ).items():
# Default type is string.
value_type = "str"
try:
# Try int.
int( value )
value_type = "int"
except:
try:
# Try float.
float( value )
value_type = "float"
except:
pass
attribute_types[ name ] = value_type
except:
pass
if i + 1 == num_lines:
break
# Set attribute metadata and then set additional metadata.
dataset.metadata.attribute_types = attribute_types
dataset.metadata.attributes = len( attribute_types )
def set_meta( self, dataset, overwrite=True, **kwd ):
self.set_attribute_metadata( dataset )
i = 0
for i, line in enumerate( file( dataset.file_name ) ):
line = line.rstrip('\r\n')
if line and not line.startswith( '#' ):
elems = line.split( '\t' )
if len(elems) == 9:
try:
int( elems[3] )
int( elems[4] )
break
except:
pass
Tabular.set_meta( self, dataset, overwrite=overwrite, skip=i )
def display_peek( self, dataset ):
"""Returns formated html of peek"""
return Tabular.make_html_table( self, dataset, column_names=self.column_names )
def get_estimated_display_viewport( self, dataset ):
"""
Return a chrom, start, stop tuple for viewing a file. There are slight differences between gff 2 and gff 3
formats. This function should correctly handle both...
"""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max( viewport_feature_count, 500 ) # maximum number of lines to check; includes comment lines
if self.displayable( dataset ):
try:
seqid = None
start = sys.maxsize
stop = 0
fh = open( dataset.file_name )
while True:
line = fh.readline( VIEWPORT_READLINE_BUFFER_SIZE )
if not line:
break # EOF
try:
if line.startswith( '##sequence-region' ): # ##sequence-region IV 6000000 6030000
elems = line.rstrip( '\n\r' ).split()
if len( elems ) > 3:
# line looks like:
# sequence-region ctg123 1 1497228
seqid = elems[1] # IV
start = int( elems[2] ) # 6000000
stop = int( elems[3] ) # 6030000
break # use location declared in file
elif len( elems ) == 2 and elems[1].find( '..' ) > 0:
# line looks like this:
# sequence-region X:120000..140000
elems = elems[1].split( ':' )
seqid = elems[0]
start = int( elems[1].split( '..' )[0] )
stop = int( elems[1].split( '..' )[1] )
break # use location declared in file
else:
log.exception( "line (%s) uses an unsupported ##sequence-region definition." % str( line ) )
# break #no break, if bad definition, we try another method
elif line.startswith("browser position"):
# Allow UCSC style browser and track info in the GFF file
pos_info = line.split()[-1]
seqid, startend = pos_info.split(":")
start, stop = map( int, startend.split("-") )
break # use location declared in file
elif True not in map( line.startswith, ( '#', 'track', 'browser' ) ): # line.startswith() does not accept iterator in python2.4
viewport_feature_count -= 1
elems = line.rstrip( '\n\r' ).split( '\t' )
if len( elems ) > 3:
if not seqid:
# We can only set the viewport for a single chromosome
seqid = elems[0]
if seqid == elems[0]:
# Make sure we have not spanned chromosomes
start = min( start, int( elems[3] ) )
stop = max( stop, int( elems[4] ) )
except:
# most likely start/stop is not an int or not enough fields
pass
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip( '\n\r' ) == line:
assert readline_count > 0, Exception( 'Viewport readline count exceeded for dataset %s.' % dataset.id )
line = fh.readline( VIEWPORT_READLINE_BUFFER_SIZE )
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if seqid is not None:
return ( seqid, str( start ), str( stop ) ) # Necessary to return strings?
except Exception as e:
# unexpected error
log.exception( str( e ) )
return ( None, None, None ) # could not determine viewport
def ucsc_links( self, dataset, type, app, base_url ):
ret_val = []
seqid, start, stop = self.get_estimated_display_viewport( dataset )
if seqid is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('ucsc', dataset.dbkey ):
if site_name in app.datatypes_registry.get_display_sites('ucsc'):
redirect_url = urllib.quote_plus(
"%sdb=%s&position=%s:%s-%s&hgt.customText=%%s" %
( site_url, dataset.dbkey, seqid, start, stop ) )
link = self._get_remote_call_url( redirect_url, site_name, dataset, type, app, base_url )
ret_val.append( ( site_name, link ) )
return ret_val
def gbrowse_links( self, dataset, type, app, base_url ):
ret_val = []
seqid, start, stop = self.get_estimated_display_viewport( dataset )
if seqid is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('gbrowse', dataset.dbkey ):
if site_name in app.datatypes_registry.get_display_sites('gbrowse'):
if seqid.startswith( 'chr' ) and len( seqid ) > 3:
seqid = seqid[3:]
redirect_url = urllib.quote_plus( "%s/?q=%s:%s..%s&eurl=%%s" % ( site_url, seqid, start, stop ) )
link = self._get_remote_call_url( redirect_url, site_name, dataset, type, app, base_url )
ret_val.append( ( site_name, link ) )
return ret_val
def sniff( self, filename ):
"""
Determines whether the file is in gff format
GFF lines have nine required fields that must be tab-separated.
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format3
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'gff_version_3.gff' )
>>> Gff().sniff( fname )
False
>>> fname = get_test_fname( 'test.gff' )
>>> Gff().sniff( fname )
True
"""
headers = get_headers( filename, '\t' )
try:
if len(headers) < 2:
return False
for hdr in headers:
if hdr and hdr[0].startswith( '##gff-version' ) and hdr[0].find( '2' ) < 0:
return False
if hdr and hdr[0] and not hdr[0].startswith( '#' ):
if len(hdr) != 9:
return False
try:
int( hdr[3] )
int( hdr[4] )
except:
return False
if hdr[5] != '.':
try:
float( hdr[5] )
except:
return False
if hdr[6] not in data.valid_strand:
return False
return True
except:
return False
# ------------- Dataproviders
# redefine bc super is Tabular
@dataproviders.decorators.dataprovider_factory( 'genomic-region',
dataproviders.dataset.GenomicRegionDataProvider.settings )
def genomic_region_dataprovider( self, dataset, **settings ):
return dataproviders.dataset.GenomicRegionDataProvider( dataset, 0, 3, 4, **settings )
@dataproviders.decorators.dataprovider_factory( 'genomic-region-dict',
dataproviders.dataset.GenomicRegionDataProvider.settings )
def genomic_region_dict_dataprovider( self, dataset, **settings ):
settings[ 'named_columns' ] = True
return self.genomic_region_dataprovider( dataset, **settings )
@dataproviders.decorators.dataprovider_factory( 'interval',
dataproviders.dataset.IntervalDataProvider.settings )
def interval_dataprovider( self, dataset, **settings ):
return dataproviders.dataset.IntervalDataProvider( dataset, 0, 3, 4, 6, 2, **settings )
@dataproviders.decorators.dataprovider_factory( 'interval-dict',
dataproviders.dataset.IntervalDataProvider.settings )
def interval_dict_dataprovider( self, dataset, **settings ):
settings[ 'named_columns' ] = True
return self.interval_dataprovider( dataset, **settings )
class Gff3( Gff ):
"""Tab delimited data in Gff3 format"""
edam_format = "format_1975"
file_ext = "gff3"
valid_gff3_strand = ['+', '-', '.', '?']
valid_gff3_phase = ['.', '0', '1', '2']
column_names = [ 'Seqid', 'Source', 'Type', 'Start', 'End', 'Score', 'Strand', 'Phase', 'Attributes' ]
track_type = Interval.track_type
"""Add metadata elements"""
MetadataElement( name="column_types", default=['str', 'str', 'str', 'int', 'int', 'float', 'str', 'int', 'list'],
param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False )
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Gff.__init__(self, **kwd)
def set_meta( self, dataset, overwrite=True, **kwd ):
self.set_attribute_metadata( dataset )
i = 0
for i, line in enumerate( file( dataset.file_name ) ):
line = line.rstrip('\r\n')
if line and not line.startswith( '#' ):
elems = line.split( '\t' )
valid_start = False
valid_end = False
if len( elems ) == 9:
try:
start = int( elems[3] )
valid_start = True
except:
if elems[3] == '.':
valid_start = True
try:
end = int( elems[4] )
valid_end = True
except:
if elems[4] == '.':
valid_end = True
strand = elems[6]
phase = elems[7]
if valid_start and valid_end and start < end and strand in self.valid_gff3_strand and phase in self.valid_gff3_phase:
break
Tabular.set_meta( self, dataset, overwrite=overwrite, skip=i )
def sniff( self, filename ):
"""
Determines whether the file is in gff version 3 format
GFF 3 format:
1) adds a mechanism for representing more than one level
of hierarchical grouping of features and subfeatures.
2) separates the ideas of group membership and feature name/id
3) constrains the feature type field to be taken from a controlled
vocabulary.
4) allows a single feature, such as an exon, to belong to more than
one group at a time.
5) provides an explicit convention for pairwise alignments
6) provides an explicit convention for features that occupy disjunct regions
The format consists of 9 columns, separated by tabs (NOT spaces).
Undefined fields are replaced with the "." character, as described in the original GFF spec.
For complete details see http://song.sourceforge.net/gff3.shtml
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test.gff' )
>>> Gff3().sniff( fname )
False
>>> fname = get_test_fname('gff_version_3.gff')
>>> Gff3().sniff( fname )
True
"""
headers = get_headers( filename, '\t' )
try:
if len(headers) < 2:
return False
for hdr in headers:
if hdr and hdr[0].startswith( '##gff-version' ) and hdr[0].find( '3' ) >= 0:
return True
elif hdr and hdr[0].startswith( '##gff-version' ) and hdr[0].find( '3' ) < 0:
return False
# Header comments may have been stripped, so inspect the data
if hdr and hdr[0] and not hdr[0].startswith( '#' ):
if len(hdr) != 9:
return False
try:
int( hdr[3] )
except:
if hdr[3] != '.':
return False
try:
int( hdr[4] )
except:
if hdr[4] != '.':
return False
if hdr[5] != '.':
try:
float( hdr[5] )
except:
return False
if hdr[6] not in self.valid_gff3_strand:
return False
if hdr[7] not in self.valid_gff3_phase:
return False
return True
except:
return False
class Gtf( Gff ):
"""Tab delimited data in Gtf format"""
edam_format = "format_2306"
file_ext = "gtf"
column_names = [ 'Seqname', 'Source', 'Feature', 'Start', 'End', 'Score', 'Strand', 'Frame', 'Attributes' ]
track_type = Interval.track_type
"""Add metadata elements"""
MetadataElement( name="columns", default=9, desc="Number of columns", readonly=True, visible=False )
MetadataElement( name="column_types", default=['str', 'str', 'str', 'int', 'int', 'float', 'str', 'int', 'list'],
param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False )
def sniff( self, filename ):
"""
Determines whether the file is in gtf format
GTF lines have nine required fields that must be tab-separated. The first eight GTF fields are the same as GFF.
The group field has been expanded into a list of attributes. Each attribute consists of a type/value pair.
Attributes must end in a semi-colon, and be separated from any following attribute by exactly one space.
The attribute list must begin with the two mandatory attributes:
gene_id value - A globally unique identifier for the genomic source of the sequence.
transcript_id value - A globally unique identifier for the predicted transcript.
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format4
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( '1.bed' )
>>> Gtf().sniff( fname )
False
>>> fname = get_test_fname( 'test.gff' )
>>> Gtf().sniff( fname )
False
>>> fname = get_test_fname( 'test.gtf' )
>>> Gtf().sniff( fname )
True
"""
headers = get_headers( filename, '\t' )
try:
if len(headers) < 2:
return False
for hdr in headers:
if hdr and hdr[0].startswith( '##gff-version' ) and hdr[0].find( '2' ) < 0:
return False
if hdr and hdr[0] and not hdr[0].startswith( '#' ):
if len(hdr) != 9:
return False
try:
int( hdr[3] )
int( hdr[4] )
except:
return False
if hdr[5] != '.':
try:
float( hdr[5] )
except:
return False
if hdr[6] not in data.valid_strand:
return False
# Check attributes for gene_id, transcript_id
attributes = parse_gff_attributes( hdr[8] )
if len( attributes ) >= 2:
if 'gene_id' not in attributes:
return False
if 'transcript_id' not in attributes:
return False
else:
return False
return True
except:
return False
@dataproviders.decorators.has_dataproviders
class Wiggle( Tabular, _RemoteCallMixin ):
"""Tab delimited data in wiggle format"""
edam_format = "format_3005"
file_ext = "wig"
track_type = "LineTrack"
data_sources = { "data": "bigwig", "index": "bigwig" }
MetadataElement( name="columns", default=3, desc="Number of columns", readonly=True, visible=False )
def __init__( self, **kwd ):
Tabular.__init__( self, **kwd )
self.add_display_app( 'ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links' )
self.add_display_app( 'gbrowse', 'display in Gbrowse', 'as_gbrowse_display_file', 'gbrowse_links' )
def get_estimated_display_viewport( self, dataset ):
"""Return a chrom, start, stop tuple for viewing a file."""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max( viewport_feature_count, 500 ) # maximum number of lines to check; includes comment lines
if self.displayable( dataset ):
try:
chrom = None
start = sys.maxsize
end = 0
span = 1
step = None
fh = open( dataset.file_name )
while True:
line = fh.readline( VIEWPORT_READLINE_BUFFER_SIZE )
if not line:
break # EOF
try:
if line.startswith( "browser" ):
chr_info = line.rstrip( '\n\r' ).split()[-1]
chrom, coords = chr_info.split( ":" )
start, end = map( int, coords.split( "-" ) )
break # use the browser line
# variableStep chrom=chr20
if line and ( line.lower().startswith( "variablestep" ) or line.lower().startswith( "fixedstep" ) ):
if chrom is not None:
break # different chrom or different section of the chrom
chrom = line.rstrip( '\n\r' ).split("chrom=")[1].split()[0]
if 'span=' in line:
span = int( line.rstrip( '\n\r' ).split("span=")[1].split()[0] )
if 'step=' in line:
step = int( line.rstrip( '\n\r' ).split("step=")[1].split()[0] )
start = int( line.rstrip( '\n\r' ).split("start=")[1].split()[0] )
else:
fields = line.rstrip( '\n\r' ).split()
if fields:
if step is not None:
if not end:
end = start + span
else:
end += step
else:
start = min( int( fields[0] ), start )
end = max( end, int( fields[0] ) + span )
viewport_feature_count -= 1
except:
pass
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip( '\n\r' ) == line:
assert readline_count > 0, Exception( 'Viewport readline count exceeded for dataset %s.' % dataset.id )
line = fh.readline( VIEWPORT_READLINE_BUFFER_SIZE )
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if chrom is not None:
return ( chrom, str( start ), str( end ) ) # Necessary to return strings?
except Exception as e:
# unexpected error
log.exception( str( e ) )
return ( None, None, None ) # could not determine viewport
def gbrowse_links( self, dataset, type, app, base_url ):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport( dataset )
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('gbrowse', dataset.dbkey ):
if site_name in app.datatypes_registry.get_display_sites('gbrowse'):
if chrom.startswith( 'chr' ) and len( chrom ) > 3:
chrom = chrom[3:]
redirect_url = urllib.quote_plus( "%s/?q=%s:%s..%s&eurl=%%s" % ( site_url, chrom, start, stop ) )
link = self._get_remote_call_url( redirect_url, site_name, dataset, type, app, base_url )
ret_val.append( ( site_name, link ) )
return ret_val
def ucsc_links( self, dataset, type, app, base_url ):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport( dataset )
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('ucsc', dataset.dbkey ):
if site_name in app.datatypes_registry.get_display_sites('ucsc'):
redirect_url = urllib.quote_plus( "%sdb=%s&position=%s:%s-%s&hgt.customText=%%s" % ( site_url, dataset.dbkey, chrom, start, stop ) )
link = self._get_remote_call_url( redirect_url, site_name, dataset, type, app, base_url )
ret_val.append( ( site_name, link ) )
return ret_val
def display_peek( self, dataset ):
"""Returns formated html of peek"""
return Tabular.make_html_table( self, dataset, skipchars=['track', '#'] )
def set_meta( self, dataset, overwrite=True, **kwd ):
max_data_lines = None
i = 0
for i, line in enumerate( file( dataset.file_name ) ):
line = line.rstrip('\r\n')
if line and not line.startswith( '#' ):
elems = line.split( '\t' )
try:
float( elems[0] ) # "Wiggle track data values can be integer or real, positive or negative values"
break
except:
do_break = False
for col_startswith in data.col1_startswith:
if elems[0].lower().startswith( col_startswith ):
do_break = True
break
if do_break:
break
if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
# we'll arbitrarily only use the first 100 data lines in this wig file to calculate tabular attributes (column types)
# this should be sufficient, except when we have mixed wig track types (bed, variable, fixed),
# but those cases are not a single table that would have consistant column definitions
# optional metadata values set in Tabular class will be 'None'
max_data_lines = 100
Tabular.set_meta( self, dataset, overwrite=overwrite, skip=i, max_data_lines=max_data_lines )
def sniff( self, filename ):
"""
Determines wether the file is in wiggle format
The .wig format is line-oriented. Wiggle data is preceeded by a track definition line,
which adds a number of options for controlling the default display of this track.
Following the track definition line is the track data, which can be entered in several
different formats.
The track definition line begins with the word 'track' followed by the track type.
The track type with version is REQUIRED, and it currently must be wiggle_0. For example,
track type=wiggle_0...
For complete details see http://genome.ucsc.edu/goldenPath/help/wiggle.html
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'interval1.bed' )
>>> Wiggle().sniff( fname )
False
>>> fname = get_test_fname( 'wiggle.wig' )
>>> Wiggle().sniff( fname )
True
"""
headers = get_headers( filename, None )
try:
for hdr in headers:
if len(hdr) > 1 and hdr[0] == 'track' and hdr[1].startswith('type=wiggle'):
return True
return False
except:
return False
def get_track_window(self, dataset, data, start, end):
"""
Assumes we have a numpy file.
"""
range = end - start
# Determine appropriate resolution to plot ~1000 points
resolution = ( 10 ** math.ceil( math.log10( range / 1000 ) ) )
# Restrict to valid range
resolution = min( resolution, 100000 )
resolution = max( resolution, 1 )
# Memory map the array (don't load all the data)
data = numpy.load( data )
# Grab just what we need
t_start = math.floor( start / resolution )
t_end = math.ceil( end / resolution )
x = numpy.arange( t_start, t_end ) * resolution
y = data[ t_start : t_end ]
return zip(x.tolist(), y.tolist())
def get_track_resolution( self, dataset, start, end):
range = end - start
# Determine appropriate resolution to plot ~1000 points
resolution = math.ceil( 10 ** math.ceil( math.log10( range / 1000 ) ) )
# Restrict to valid range
resolution = min( resolution, 100000 )
resolution = max( resolution, 1 )
return resolution
# ------------- Dataproviders
@dataproviders.decorators.dataprovider_factory( 'wiggle', dataproviders.dataset.WiggleDataProvider.settings )
def wiggle_dataprovider( self, dataset, **settings ):
dataset_source = dataproviders.dataset.DatasetDataProvider( dataset )
return dataproviders.dataset.WiggleDataProvider( dataset_source, **settings )
@dataproviders.decorators.dataprovider_factory( 'wiggle-dict', dataproviders.dataset.WiggleDataProvider.settings )
def wiggle_dict_dataprovider( self, dataset, **settings ):
dataset_source = dataproviders.dataset.DatasetDataProvider( dataset )
settings[ 'named_columns' ] = True
return dataproviders.dataset.WiggleDataProvider( dataset_source, **settings )
class CustomTrack ( Tabular ):
"""UCSC CustomTrack"""
file_ext = "customtrack"
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display app"""
Tabular.__init__(self, **kwd)
self.add_display_app( 'ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links' )
def set_meta( self, dataset, overwrite=True, **kwd ):
Tabular.set_meta( self, dataset, overwrite=overwrite, skip=1 )
def display_peek( self, dataset ):
"""Returns formated html of peek"""
return Tabular.make_html_table( self, dataset, skipchars=['track', '#'] )
def get_estimated_display_viewport( self, dataset, chrom_col=None, start_col=None, end_col=None ):
"""Return a chrom, start, stop tuple for viewing a file."""
# FIXME: only BED and WIG custom tracks are currently supported
# As per previously existing behavior, viewport will only be over the first intervals
max_line_count = 100 # maximum number of lines to check; includes comment lines
variable_step_wig = False
chrom = None
span = 1
if self.displayable( dataset ):
try:
fh = open( dataset.file_name )
while True:
line = fh.readline( VIEWPORT_READLINE_BUFFER_SIZE )
if not line:
break # EOF
if not line.startswith( '#' ):
try:
if variable_step_wig:
fields = line.rstrip().split()
if len( fields ) == 2:
start = int( fields[ 0 ] )
return ( chrom, str( start ), str( start + span ) )
elif line and ( line.lower().startswith( "variablestep" ) or line.lower().startswith( "fixedstep" ) ):
chrom = line.rstrip( '\n\r' ).split("chrom=")[1].split()[0]
if 'span=' in line:
span = int( line.rstrip( '\n\r' ).split("span=")[1].split()[0] )
if 'start=' in line:
start = int( line.rstrip( '\n\r' ).split("start=")[1].split()[0] )
return ( chrom, str( start ), str( start + span ) )
else:
variable_step_wig = True
else:
fields = line.rstrip().split( '\t' )
if len( fields ) >= 3:
chrom = fields[ 0 ]
start = int( fields[ 1 ] )
end = int( fields[ 2 ] )
return ( chrom, str( start ), str( end ) )
except Exception:
# most likely a non-integer field has been encountered for start / stop
continue
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip( '\n\r' ) == line:
assert readline_count > 0, Exception( 'Viewport readline count exceeded for dataset %s.' % dataset.id )
line = fh.readline( VIEWPORT_READLINE_BUFFER_SIZE )
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not max_line_count:
# exceeded viewport or total line count to check
break
except Exception as e:
# unexpected error
log.exception( str( e ) )
return ( None, None, None ) # could not determine viewport
def ucsc_links( self, dataset, type, app, base_url ):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('ucsc', dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites('ucsc'):
internal_url = "%s" % url_for( controller='dataset', dataset_id=dataset.id, action='display_at', filename='ucsc_' + site_name )
display_url = urllib.quote_plus( "%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" % (base_url, url_for( controller='root' ), dataset.id, type) )
redirect_url = urllib.quote_plus( "%sdb=%s&position=%s:%s-%s&hgt.customText=%%s" % (site_url, dataset.dbkey, chrom, start, stop ) )
link = '%s?redirect_url=%s&display_url=%s' % ( internal_url, redirect_url, display_url )
ret_val.append( (site_name, link) )
return ret_val
def sniff( self, filename ):
"""
Determines whether the file is in customtrack format.
CustomTrack files are built within Galaxy and are basically bed or interval files with the first line looking
something like this.
track name="User Track" description="User Supplied Track (from Galaxy)" color=0,0,0 visibility=1
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'complete.bed' )
>>> CustomTrack().sniff( fname )
False
>>> fname = get_test_fname( 'ucsc.customtrack' )
>>> CustomTrack().sniff( fname )
True
"""
headers = get_headers( filename, None )
first_line = True
for hdr in headers:
if first_line:
first_line = False
try:
if hdr[0].startswith('track'):
color_found = False
visibility_found = False
for elem in hdr[1:]:
if elem.startswith('color'):
color_found = True
if elem.startswith('visibility'):
visibility_found = True
if color_found and visibility_found:
break
if not color_found or not visibility_found:
return False
else:
return False
except:
return False
else:
try:
if hdr[0] and not hdr[0].startswith( '#' ):
if len( hdr ) < 3:
return False
try:
int( hdr[1] )
int( hdr[2] )
except:
return False
except:
return False
return True
class ENCODEPeak( Interval ):
'''
Human ENCODE peak format. There are both broad and narrow peak formats.
Formats are very similar; narrow peak has an additional column, though.
Broad peak ( http://genome.ucsc.edu/FAQ/FAQformat#format13 ):
This format is used to provide called regions of signal enrichment based
on pooled, normalized (interpreted) data. It is a BED 6+3 format.
Narrow peak http://genome.ucsc.edu/FAQ/FAQformat#format12 and :
This format is used to provide called peaks of signal enrichment based on
pooled, normalized (interpreted) data. It is a BED6+4 format.
'''
file_ext = "encodepeak"
column_names = [ 'Chrom', 'Start', 'End', 'Name', 'Score', 'Strand', 'SignalValue', 'pValue', 'qValue', 'Peak' ]
data_sources = { "data": "tabix", "index": "bigwig" }
"""Add metadata elements"""
MetadataElement( name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter )
MetadataElement( name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter )
MetadataElement( name="endCol", default=3, desc="End column", param=metadata.ColumnParameter )
MetadataElement( name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0 )
MetadataElement( name="columns", default=3, desc="Number of columns", readonly=True, visible=False )
def sniff( self, filename ):
return False
class ChromatinInteractions( Interval ):
'''
Chromatin interactions obtained from 3C/5C/Hi-C experiments.
'''
file_ext = "chrint"
track_type = "DiagonalHeatmapTrack"
data_sources = { "data": "tabix", "index": "bigwig" }
column_names = [ 'Chrom1', 'Start1', 'End1', 'Chrom2', 'Start2', 'End2', 'Value' ]
"""Add metadata elements"""
MetadataElement( name="chrom1Col", default=1, desc="Chrom1 column", param=metadata.ColumnParameter )
MetadataElement( name="start1Col", default=2, desc="Start1 column", param=metadata.ColumnParameter )
MetadataElement( name="end1Col", default=3, desc="End1 column", param=metadata.ColumnParameter )
MetadataElement( name="chrom2Col", default=4, desc="Chrom2 column", param=metadata.ColumnParameter )
MetadataElement( name="start2Col", default=5, desc="Start2 column", param=metadata.ColumnParameter )
MetadataElement( name="end2Col", default=6, desc="End2 column", param=metadata.ColumnParameter )
MetadataElement( name="valueCol", default=7, desc="Value column", param=metadata.ColumnParameter )
MetadataElement( name="columns", default=7, desc="Number of columns", readonly=True, visible=False )
def sniff( self, filename ):
return False
class ScIdx(Tabular):
"""
ScIdx files are 1-based and consist of strand-specific coordinate counts.
They always have 5 columns, and the first row is the column labels:
'chrom', 'index', 'forward', 'reverse', 'value'.
Each line following the first consists of data:
chromosome name (type str), peak index (type int), Forward strand peak
count (type int), Reverse strand peak count (type int) and value (type int).
The value of the 5th 'value' column is the sum of the forward and reverse
peak count values.
"""
file_ext = "scidx"
MetadataElement(name="columns", default=0, desc="Number of columns", readonly=True, visible=False)
MetadataElement(name="column_types", default=[], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False, no_value=[])
def __init__(self, **kwd):
"""
Initialize scidx datatype.
"""
Tabular.__init__(self, **kwd)
# Don't set column names since the first
# line of the dataset displays them.
self.column_names = ['chrom', 'index', 'forward', 'reverse', 'value']
def sniff(self, filename):
"""
Checks for 'scidx-ness.'
"""
try:
count = 0
fh = open(filename, "r")
while True:
line = fh.readline()
line = line.strip()
# The first line is always a comment like this:
# 2015-11-23 20:18:56.51;input.bam;READ1
if count == 0:
if line.startswith('#'):
count += 1
continue
else:
return False
if not line:
# EOF
if count > 1:
# The second line is always the labels:
# chrom index forward reverse value
# We need at least the column labels and a data line.
return True
return False
# Skip first line.
if count > 1:
items = line.split('\t')
if len(items) != 5:
return False
index = items[1]
if not index.isdigit():
return False
forward = items[2]
if not forward.isdigit():
return False
reverse = items[3]
if not reverse.isdigit():
return False
value = items[4]
if not value.isdigit():
return False
if int(forward) + int(reverse) != int(value):
return False
if count == 100:
return True
count += 1
if count < 100 and count > 0:
return True
except:
return False
finally:
fh.close()
return False
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules[__name__])
| 48.563057 | 208 | 0.52673 |
be5429303e0356e1730e3ab24c345369c08f6a49 | 614 | py | Python | Lib/site-packages/ghtTests/trivialscript.py | Jriszz/guacamole-python | cf0dfcaaa7d85c3577571954fc5b2b9dcf55ba17 | [
"MIT"
] | 1 | 2021-11-02T06:48:13.000Z | 2021-11-02T06:48:13.000Z | Lib/site-packages/ghtTests/trivialscript.py | Jriszz/guacamole-python | cf0dfcaaa7d85c3577571954fc5b2b9dcf55ba17 | [
"MIT"
] | null | null | null | Lib/site-packages/ghtTests/trivialscript.py | Jriszz/guacamole-python | cf0dfcaaa7d85c3577571954fc5b2b9dcf55ba17 | [
"MIT"
] | null | null | null | from __future__ import print_function
import sys
from twisted.application import reactors
reactors.installReactor('qt4')
from twisted.internet import reactor, task
from twisted.python import log, runtime
log.startLogging(sys.stdout)
def testreactor():
print('tick...')
def doit():
log.msg("reactor module: ", reactor.__module__)
task.LoopingCall(testreactor).start(1.0)
reactor.callLater(5.0, reactor.stop)
log.msg("platform runtime: " + repr(runtime.platform.getType()))
reactor.callWhenRunning(doit)
log.msg('calling reactor.run()')
reactor.run()
log.msg('fell off the bottom?...')
| 21.172414 | 68 | 0.737785 |
77b344a2b70254578540c292396381035e9c4739 | 1,139 | py | Python | setup.py | LuizHNLorena/jgraph | fa658299c07a588badcf4fb2cafa1fc68ac5e72d | [
"MIT"
] | 91 | 2015-12-24T09:11:54.000Z | 2022-01-22T21:18:50.000Z | setup.py | LuizHNLorena/jgraph | fa658299c07a588badcf4fb2cafa1fc68ac5e72d | [
"MIT"
] | 12 | 2015-12-21T21:57:34.000Z | 2019-12-18T17:33:22.000Z | setup.py | LuizHNLorena/jgraph | fa658299c07a588badcf4fb2cafa1fc68ac5e72d | [
"MIT"
] | 28 | 2015-12-25T14:14:56.000Z | 2021-12-10T06:24:35.000Z | from setuptools import setup
from python import __version__
setup(
name="jgraph",
version=__version__,
description="View graph data structures in the IPython notebook.",
url="http://github.com/patrickfuller/jgraph/",
license="MIT",
author="Patrick Fuller",
author_email="patrickfuller@gmail.com",
package_dir={'jgraph': 'python',
'jgraph.js': 'js'},
package_data={'jgraph.js': ['js/build/jgraph.min.js']},
include_package_data=True,
packages=['jgraph', 'jgraph.js'],
install_requires=['ipython'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Education',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Framework :: IPython',
'Topic :: Education :: Computer Aided Instruction (CAI)'
]
)
| 34.515152 | 70 | 0.614574 |
3f6aec647fcbf099ae720454868a43da376da8ce | 1,424 | py | Python | versions/paper.py | Advik-B/MC-Server-Installer | a52ed35eac828f220044b5a751c5f8ecf4d82f42 | [
"MIT"
] | 1 | 2021-08-15T11:23:09.000Z | 2021-08-15T11:23:09.000Z | versions/paper.py | Advik-B/Server-Installer | a52ed35eac828f220044b5a751c5f8ecf4d82f42 | [
"MIT"
] | null | null | null | versions/paper.py | Advik-B/Server-Installer | a52ed35eac828f220044b5a751c5f8ecf4d82f42 | [
"MIT"
] | null | null | null | from versions.default import *
class Paper(Server):
def getlink(version_number:str) -> str:
versions = {
'1.10.2':'https://www.mediafire.com/file/1ds9hhiuvhazn5e/paper-1.10.2-916.jar/file',
'1.11.2':'https://www.mediafire.com/file/eeidpu8dcg0h23v/paper-1.11.2-1104.jar/file',
'1.12.2':"https://www.mediafire.com/file/qhop9drado0wwax/paper-1.12.2-1618.jar/file",
'1.13.2':'https://www.mediafire.com/file/c0a19hcqag70mcc/paper-1.13.2-655.jar/file',
'1.14.4':'https://www.mediafire.com/file/818earslka4fjoy/paper-1.14.4-243.jar/file',
'1.15.2':'https://www.mediafire.com/file/zsdkqs8s2ae4z2h/paper-1.15.2-391.jar/file',
'1.16.5':'https://www.mediafire.com/file/pfyj4n1lbc2grm7/paper-1.16.5-786.jar/file',
'1.17.1':'https://www.mediafire.com/file/6dzmfryu6f7lbwk/paper-1.17.1-186.jar/file',
'1.8.9':'https://www.mediafire.com/file/346u26k0t169x7t/paper-1.8.8-443.jar/file',
'1.9.4':'https://www.mediafire.com/file/p4qbs1z59emqsy6/paper-1.9.4-773.jar/file'
}
version = versions.keys()
if version_number in version:
download_url = versions[version_number]
return download_url
else:
print()
print(f'The version "{version_number}" is not found!')
print()
return "about:blank" | 38.486486 | 97 | 0.606039 |
7968cb2897c532040d9f32f888ffdb6e6422ba4f | 2,706 | py | Python | modules/filter.py | TheHercules/Telegram-UserBot | 62f7abf68eaf729779f9e0c4af4c000445d6136b | [
"Unlicense"
] | 2 | 2018-10-23T09:15:18.000Z | 2019-02-14T11:47:08.000Z | modules/filter.py | pkp1337x/Telegram-UserBot | f139a1b09f01da795a663fc8ff9d147ecc51e8de | [
"Unlicense"
] | null | null | null | modules/filter.py | pkp1337x/Telegram-UserBot | f139a1b09f01da795a663fc8ff9d147ecc51e8de | [
"Unlicense"
] | null | null | null | import sqlite3
@bot.on(events.NewMessage(incoming=True))
@bot.on(events.MessageEdited(incoming=True))
async def filter_incoming_handler(e):
db=sqlite3.connect("filters.db")
cursor=db.cursor()
cursor.execute('''SELECT * FROM FILTER''')
all_rows = cursor.fetchall()
for row in all_rows:
if int(row[0]) == int(e.chat_id):
if str(row[1]) in str(e.text):
await e.reply(row[2])
db.close()
@bot.on(events.NewMessage(outgoing=True, pattern='.filter'))
@bot.on(events.MessageEdited(outgoing=True, pattern='.filter'))
async def add_filter(e):
message=e.text
kek=message.split()
db=sqlite3.connect("filters.db")
cursor=db.cursor()
string=""
for i in range(2,len(kek)):
string=string+" "+str(kek[i])
cursor.execute('''INSERT INTO FILTER VALUES(?,?,?)''', (int(e.chat_id),kek[1],string))
db.commit()
await e.edit("```Added Filter Successfully```")
db.close()
@bot.on(events.NewMessage(outgoing=True, pattern='.nofilter'))
@bot.on(events.MessageEdited(outgoing=True, pattern='.nofilter'))
async def remove_filter(e):
message=e.text
kek=message.split()
db=sqlite3.connect("filters.db")
cursor=db.cursor()
cursor.execute('''DELETE FROM FILTER WHERE chat_id=? AND filter=?''', (int(e.chat_id),kek[1]))
db.commit()
await e.edit("```Removed Filter Successfully```")
db.close()
@bot.on(events.NewMessage(outgoing=True, pattern='.rmfilters'))
@bot.on(events.MessageEdited(outgoing=True, pattern='.rmfilters'))
async def kick_marie_filter(e):
await e.edit("```Will be kicking away all Marie filters.```")
time.sleep(3)
r = await e.get_reply_message()
filters = r.text.split('-')[1:]
for filter in filters:
await e.reply('/stop %s' % (filter.strip()))
await asyncio.sleep(0.3)
await e.respond('/filter filters @baalajimaestro kicked them all')
await e.respond("```Successfully cleaned Marie filters yaay!```\n Gimme cookies @baalajimaestro")
if LOGGER:
await bot.send_message(LOGGER_GROUP,"I cleaned all Marie filters at "+str(e.chat_id))
@bot.on(events.NewMessage(outgoing=True, pattern='.get filters'))
@bot.on(events.MessageEdited(outgoing=True, pattern='.get filters'))
async def filters_active(e):
db=sqlite3.connect("filters.db")
cursor=db.cursor()
transact="Filters active on this chat: \n"
cursor.execute('''SELECT * FROM FILTER''')
all_rows = cursor.fetchall()
for row in all_rows:
if int(row[0]) == int(e.chat_id):
transact=transact+"-"+str(row[1])+" : "+str(row[2])+"\n"
db.close()
await e.edit(transact)
| 41 | 101 | 0.642646 |
abf0666002d1b2b7d4794d89adf2a0ee200d4d85 | 1,651 | py | Python | stonesoup/predictor/base.py | riskaware-ltd/Stone-Soup | ef3a2fb7e121c00dcf458e370794db3785b732e3 | [
"MIT"
] | null | null | null | stonesoup/predictor/base.py | riskaware-ltd/Stone-Soup | ef3a2fb7e121c00dcf458e370794db3785b732e3 | [
"MIT"
] | 4 | 2020-03-10T13:51:00.000Z | 2020-03-23T12:38:24.000Z | stonesoup/predictor/base.py | riskaware-ltd/Stone-Soup | ef3a2fb7e121c00dcf458e370794db3785b732e3 | [
"MIT"
] | 1 | 2019-12-09T14:33:09.000Z | 2019-12-09T14:33:09.000Z | # -*- coding: utf-8 -*-
"""Base classes for Stone Soup Predictor interface"""
from abc import abstractmethod
from ..base import Base, Property
from ..models.transition import TransitionModel
from ..models.control import ControlModel
class Predictor(Base):
r"""Predictor base class
A predictor is used to predict a new :class:`~.State` given a prior
:class:`~.State` and a :class:`~.TransitionModel`. In addition, a
:class:`~.ControlModel` may be used to model an external influence on the
state.
.. math::
\mathbf{x}_{k|k-1} = f_k(\mathbf{x}_{k-1}, \mathbf{\nu}_k) +
b_k(\mathbf{u}_k, \mathbf{\eta}_k)
where :math:`\mathbf{x}_{k-1}` is the prior state,
:math:`f_k(\mathbf{x}_{k-1})` is the transition function,
:math:`\mathbf{u}_k` the control vector, :math:`b_k(\mathbf{u}_k)` the
control input and :math:`\mathbf{\nu}_k` and :math:`\mathbf{\eta}_k` the
transition and control model noise respectively.
"""
transition_model = Property(TransitionModel, doc="transition model")
control_model = Property(ControlModel, default=None, doc="control model")
@abstractmethod
def predict(self, prior, timestamp=None, *args, **kwargs):
"""The prediction function itself
Parameters
----------
prior : :class:`~.State`
The prior state
timestamp : :class:`datetime.datetime`, optional
Time at which the prediction is made (used by the transition
model)
Returns
-------
: :class:`~.StatePrediction`
State prediction
"""
raise NotImplementedError
| 32.372549 | 77 | 0.632344 |
1ae23d05c78cd40b3775e9824b2fd1f3576cba5a | 404 | py | Python | timeseries/computeError.py | Thanduriel/NeuralTurbulence | 044729c39b77b0acf73071b15c5ddfa79cb1f9cb | [
"MIT"
] | null | null | null | timeseries/computeError.py | Thanduriel/NeuralTurbulence | 044729c39b77b0acf73071b15c5ddfa79cb1f9cb | [
"MIT"
] | null | null | null | timeseries/computeError.py | Thanduriel/NeuralTurbulence | 044729c39b77b0acf73071b15c5ddfa79cb1f9cb | [
"MIT"
] | null | null | null | import numpy as np
import sys
sys.path.append("../utils")
import ioext
# Computes the L2 error of the given numpy arrays.
def error(array1, array2):
dif = np.subtract(array1, array2)
return np.linalg.norm(dif.flatten(), ord=2)
densities = ioext.loadData("data/validation32New/density_*.uni")
total = 0
for i in range(0, len(densities)-1):
total += error(densities[i], densities[i+1])
print(total) | 23.764706 | 64 | 0.725248 |
44a3de06c0518aebb55cfa8f2a1bbfd7551df14d | 837 | py | Python | aigpyqt/__init__.py | AIGMix/AIGPYQT | a814758d6a1dbdc75cc4ef794470c4686d28685d | [
"MIT"
] | null | null | null | aigpyqt/__init__.py | AIGMix/AIGPYQT | a814758d6a1dbdc75cc4ef794470c4686d28685d | [
"MIT"
] | null | null | null | aigpyqt/__init__.py | AIGMix/AIGPYQT | a814758d6a1dbdc75cc4ef794470c4686d28685d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : __init__.py
@Date : 2021/05/08
@Author : Yaronzz
@Version : 1.0
@Contact : yaronhuang@foxmail.com
@Desc :
'''
import sys
import aigpy
import aigpyqt.theme
from PyQt5.Qt import *
from aigpyqt.page.loginView import LoginView
loginView = None
def buttonClicked():
account = loginView.getAccountInfo()
username = account['username']
password = account['password']
if aigpy.string.isNull(username) or aigpy.string.isNull(password):
loginView.showErrMessage('请检查并重新输入正确的账号密码')
if __name__ == '__main__':
qss = aigpyqt.theme.getThemeQssContent()
app = QApplication(sys.argv)
app.setStyleSheet(qss)
loginView = LoginView()
loginView.setButtonClickFunction(buttonClicked)
loginView.show()
sys.exit(app.exec_())
| 22.621622 | 70 | 0.691756 |
fab9e5b224f9e9778b3077b5ce0d556705ac6ea4 | 788 | py | Python | recipes/libnsgif/all/test_package/conanfile.py | Speak2Erase/conan-center-index | 36ef6081cd4cd201542bd9790cc654c4eafe7b0f | [
"MIT"
] | null | null | null | recipes/libnsgif/all/test_package/conanfile.py | Speak2Erase/conan-center-index | 36ef6081cd4cd201542bd9790cc654c4eafe7b0f | [
"MIT"
] | null | null | null | recipes/libnsgif/all/test_package/conanfile.py | Speak2Erase/conan-center-index | 36ef6081cd4cd201542bd9790cc654c4eafe7b0f | [
"MIT"
] | null | null | null | import os
from conans import ConanFile, tools
from conan.tools.cmake import CMake
from conan.tools.layout import cmake_layout
class LibnsgifTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
# VirtualBuildEnv and VirtualRunEnv can be avoided if "tools.env.virtualenv:auto_use" is defined
# (it will be defined in Conan 2.0)
generators = "CMakeDeps", "CMakeToolchain", "VirtualBuildEnv", "VirtualRunEnv"
apply_env = False
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def layout(self):
cmake_layout(self)
def test(self):
if not tools.cross_building(self):
cmd = os.path.join(self.cpp.build.bindirs[0], "example")
self.run(cmd, env="conanrun")
| 29.185185 | 100 | 0.668782 |
a51e8a2285d0ed669d164028ece47abad9db855a | 1,871 | py | Python | speaking_portfolio/migrations/0007_add_coverage_model.py | jacobian/jacobian.org | cacff78aa68cb1585a63c04ff5bd7fc484066730 | [
"Apache-2.0"
] | 8 | 2018-11-12T21:11:18.000Z | 2020-10-20T09:03:54.000Z | speaking_portfolio/migrations/0007_add_coverage_model.py | jacobian/jacobian.org | cacff78aa68cb1585a63c04ff5bd7fc484066730 | [
"Apache-2.0"
] | 5 | 2018-11-28T12:56:57.000Z | 2020-02-05T21:56:48.000Z | speaking_portfolio/migrations/0007_add_coverage_model.py | jacobian/jacobian.org | cacff78aa68cb1585a63c04ff5bd7fc484066730 | [
"Apache-2.0"
] | 5 | 2018-11-19T16:47:15.000Z | 2020-02-14T22:34:26.000Z | # Generated by Django 2.1.3 on 2018-11-16 00:21
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django_postgres_unlimited_varchar
class Migration(migrations.Migration):
dependencies = [
("speaking_portfolio", "0006_drop_conference_fields_from_presentation")
]
operations = [
migrations.CreateModel(
name="Coverage",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"type",
django_postgres_unlimited_varchar.UnlimitedCharField(
choices=[
("video", "Video"),
("slides", "Slides"),
("link", "Link"),
("notes", "Notes"),
("write-up", "Write-Up"),
]
),
),
("url", models.URLField()),
(
"oembed",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True, null=True
),
),
(
"presentation",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="coverage",
to="speaking_portfolio.Presentation",
),
),
],
options={"verbose_name_plural": "coverage"},
)
]
| 31.711864 | 79 | 0.403528 |
ebb78a041769cb8b3ecf52fdf5766ac97740e671 | 9,066 | py | Python | classroom/views/teachers.py | jtryan/AssessmentApplication | d7559957fcbce9eac3739dcf2bba366edcd3363b | [
"MIT"
] | null | null | null | classroom/views/teachers.py | jtryan/AssessmentApplication | d7559957fcbce9eac3739dcf2bba366edcd3363b | [
"MIT"
] | null | null | null | classroom/views/teachers.py | jtryan/AssessmentApplication | d7559957fcbce9eac3739dcf2bba366edcd3363b | [
"MIT"
] | null | null | null | from django.contrib import messages
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.db.models import Avg, Count
from django.forms import inlineformset_factory
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import (CreateView, DeleteView, DetailView, ListView,
UpdateView)
from ..decorators import teacher_required
from ..forms import BaseAnswerInlineFormSet, QuestionForm, TeacherSignUpForm
from ..models import Answer, Question, Quiz, User,Subject
class TeacherSignUpView(CreateView):
model = User
form_class = TeacherSignUpForm
template_name = 'registration/signup_form.html'
def get_context_data(self, **kwargs):
kwargs['user_type'] = 'teacher'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
login(self.request, user)
return redirect('teachers:quiz_change_list')
@method_decorator([login_required, teacher_required], name='dispatch')
class QuizListView(ListView):
model = Quiz
ordering = ('name', )
context_object_name = 'quizzes'
template_name = 'classroom/teachers/quiz_change_list.html'
def get_queryset(self):
queryset = self.request.user.quizzes \
.select_related('subject') \
.annotate(questions_count=Count('questions', distinct=True)) \
.annotate(taken_count=Count('taken_quizzes', distinct=True))
return queryset
@method_decorator([login_required, teacher_required], name='dispatch')
class QuizCreateView(CreateView):
model = Quiz
fields = ('name', 'subject', 'duration')
template_name = 'classroom/teachers/quiz_add_form.html'
def form_valid(self, form):
quiz = form.save(commit=False)
quiz.owner = self.request.user
quiz.save()
messages.success(self.request, 'The quiz was created with success! Go ahead and add some questions now.')
return redirect('teachers:quiz_change', quiz.pk)
@method_decorator([login_required, teacher_required], name='dispatch')
class QuizUpdateView(UpdateView):
model = Quiz
fields = ('name', 'subject','duration' )
context_object_name = 'quiz'
template_name = 'classroom/teachers/quiz_change_form.html'
def get_context_data(self, **kwargs):
kwargs['questions'] = self.get_object().questions.annotate(answers_count=Count('answers'))
return super().get_context_data(**kwargs)
def get_queryset(self):
'''
This method is an implicit object-level permission management
This view will only match the ids of existing quizzes that belongs
to the logged in user.
'''
return self.request.user.quizzes.all()
def get_success_url(self):
return reverse('teachers:quiz_change', kwargs={'pk': self.object.pk})
@method_decorator([login_required, teacher_required], name='dispatch')
class QuizDeleteView(DeleteView):
model = Quiz
context_object_name = 'quiz'
template_name = 'classroom/teachers/quiz_delete_confirm.html'
success_url = reverse_lazy('teachers:quiz_change_list')
def delete(self, request, *args, **kwargs):
quiz = self.get_object()
messages.success(request, 'The quiz %s was deleted with success!' % quiz.name)
return super().delete(request, *args, **kwargs)
def get_queryset(self):
return self.request.user.quizzes.all()
@method_decorator([login_required, teacher_required], name='dispatch')
class QuizResultsView(DetailView):
model = Quiz
context_object_name = 'quiz'
template_name = 'classroom/teachers/quiz_results.html'
def get_context_data(self, **kwargs):
quiz = self.get_object()
taken_quizzes = quiz.taken_quizzes.select_related('student__user').order_by('-date')
total_taken_quizzes = taken_quizzes.count()
quiz_score = quiz.taken_quizzes.aggregate(average_score=Avg('score'))
extra_context = {
'taken_quizzes': taken_quizzes,
'total_taken_quizzes': total_taken_quizzes,
'quiz_score': quiz_score
}
kwargs.update(extra_context)
return super().get_context_data(**kwargs)
def get_queryset(self):
return self.request.user.quizzes.all()
@login_required
@teacher_required
def question_add(request, pk):
# By filtering the quiz by the url keyword argument `pk` and
# by the owner, which is the logged in user, we are protecting
# this view at the object-level. Meaning only the owner of
# quiz will be able to add questions to it.
quiz = get_object_or_404(Quiz, pk=pk, owner=request.user)
if request.method == 'POST':
form = QuestionForm(request.POST)
if form.is_valid():
question = form.save(commit=False)
question.quiz = quiz
question.save()
messages.success(request, 'You may now add answers/options to the question.')
return redirect('teachers:question_change', quiz.pk, question.pk)
else:
form = QuestionForm()
return render(request, 'classroom/teachers/question_add_form.html', {'quiz': quiz, 'form': form})
@login_required
@teacher_required
def question_change(request, quiz_pk, question_pk):
# Simlar to the `question_add` view, this view is also managing
# the permissions at object-level. By querying both `quiz` and
# `question` we are making sure only the owner of the quiz can
# change its details and also only questions that belongs to this
# specific quiz can be changed via this url (in cases where the
# user might have forged/player with the url params.
quiz = get_object_or_404(Quiz, pk=quiz_pk, owner=request.user)
question = get_object_or_404(Question, pk=question_pk, quiz=quiz)
AnswerFormSet = inlineformset_factory(
Question, # parent model
Answer, # base model
formset=BaseAnswerInlineFormSet,
fields=('text', 'is_correct'),
min_num=2,
validate_min=True,
max_num=10,
validate_max=True
)
if request.method == 'POST':
form = QuestionForm(request.POST, instance=question)
formset = AnswerFormSet(request.POST, instance=question)
if form.is_valid() and formset.is_valid():
with transaction.atomic():
form.save()
formset.save()
messages.success(request, 'Question and answers saved with success!')
return redirect('teachers:quiz_change', quiz.pk)
else:
form = QuestionForm(instance=question)
formset = AnswerFormSet(instance=question)
return render(request, 'classroom/teachers/question_change_form.html', {
'quiz': quiz,
'question': question,
'form': form,
'formset': formset
})
@method_decorator([login_required, teacher_required], name='dispatch')
class QuestionDeleteView(DeleteView):
model = Question
context_object_name = 'question'
template_name = 'classroom/teachers/question_delete_confirm.html'
pk_url_kwarg = 'question_pk'
def get_context_data(self, **kwargs):
question = self.get_object()
kwargs['quiz'] = question.quiz
return super().get_context_data(**kwargs)
def delete(self, request, *args, **kwargs):
question = self.get_object()
messages.success(request, 'The question %s was deleted with success!' % question.text)
return super().delete(request, *args, **kwargs)
def get_queryset(self):
return Question.objects.filter(quiz__owner=self.request.user)
def get_success_url(self):
question = self.get_object()
return reverse('teachers:quiz_change', kwargs={'pk': question.quiz_id})
# For course option
@method_decorator([login_required, teacher_required], name='dispatch')
class CourseListView(ListView):
model = Subject
ordering = ('name', )
context_object_name = 'courses'
template_name = 'classroom/teachers/course_change_list.html'
def get_queryset(self):
queryset = Subject.objects.all()
return queryset
# queryset = self.request.user.select_related('subject') \
# .annotate(questions_count=Count('questions', distinct=True)) \
# .annotate(taken_count=Count('taken_quizzes', distinct=True))
@method_decorator([login_required, teacher_required], name='dispatch')
class CourseCreateView(CreateView):
model = Subject
fields = ('name', 'color', )
template_name = 'classroom/teachers/course_add_form.html'
def form_valid(self, form):
course = form.save(commit=False)
course.owner = self.request.user
course.save()
messages.success(self.request, 'The class was created with success! Go ahead and add a quiz now.')
# return redirect('teachers:quiz_add', course.pk)
| 37.155738 | 113 | 0.688396 |
e4237ed0776e04a4dc0f96893d6192a0fbc0bfa2 | 6,265 | py | Python | find_relationships.py | Ernir/icelanders-on-twitter | 1cd08e735bc622d44e96d18e7e04a0a7e0cb9665 | [
"MIT"
] | null | null | null | find_relationships.py | Ernir/icelanders-on-twitter | 1cd08e735bc622d44e96d18e7e04a0a7e0cb9665 | [
"MIT"
] | null | null | null | find_relationships.py | Ernir/icelanders-on-twitter | 1cd08e735bc622d44e96d18e7e04a0a7e0cb9665 | [
"MIT"
] | null | null | null | from pprint import pprint
import os
import json
from time import time
import tweepy
def get_tweepy_instance():
"""
Reads credentials from environment variables and returns a Tweepy instance.
"""
consumer_key = os.environ.get("ERNIRNET_TWITTER_APP_ID")
consumer_secret = os.environ.get("ERNIRNET_TWITTER_APP_SECRET")
access_token = os.environ.get("ERNIRNET_TWITTER_APP_ACCESS_TOKEN")
access_secret = os.environ.get("ERNIRNET_TWITTER_APP_ACCESS_TOKEN_SECRET")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.secure = True
auth.set_access_token(access_token, access_secret)
return tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
def discover_icelanders(api, known_users):
"""
Searches for tweets in Iceland, and stores those users whose location is also assocated with Iceland.
Updates the given list of known_users.
"""
iceland_place_id = "c3932d3da7922986" # Predefined by Twitter
tweets = api.search(q="place:{0}".format(iceland_place_id), count=100)
for tweet in tweets:
if looks_icelandic(tweet.user.location) and tweet.user.id not in known_users:
known_users.append(tweet.user.id)
return known_users
def create_new_users(api, relationships, user_ids, verbose=False):
"""
Adds the given user ids to the relationships dictionary if they are Icelanders.
"""
known_foreigners = get_foreigners()
curated_ids = [id for id in user_ids if id not in relationships and id not in known_foreigners]
i = 0
page_size = 100
cache_hits = len(user_ids) - len(curated_ids)
users_created = 0
foreigners = 0
while i < len(curated_ids):
id_subset = curated_ids[i:i+page_size]
if verbose:
print("Creating {} users".format(len(id_subset)))
try:
users_list = api.lookup_users(user_ids=id_subset)
i += page_size
for user in users_list:
user_id = str(user.id)
if looks_icelandic(user.location):
users_created += 1
associate_id_with_name(user_id, user.screen_name)
relationships[user_id] = []
else:
store_foreigner(user_id)
foreigners += 1
except tweepy.error.TweepError as error:
print(str(error) + ", retrying")
if verbose:
print("{0} cache hits, {1} foreigners found, {2} new users created".format(
cache_hits, foreigners, users_created
))
return relationships
def associate_id_with_name(user_id, user_name):
"""
Stores an association between a given user id and an user name.
"""
json_filename = "ids_to_names.json"
with open(json_filename) as association_file:
associations = json.load(association_file)
associations[user_id] = user_name
with open(json_filename, "w") as association_file:
json.dump(associations, association_file, indent=4)
def get_foreigners():
"""
Returns a set of all currently recorded non-Icelanders
"""
json_filename = "foreigners.json"
with open(json_filename) as foreigners_file:
foreigners = json.load(foreigners_file)
return set(foreigners["foreigners"])
def store_foreigner(user_id):
"""
Stores the given user ID in a file, so it can be skipped later on
"""
json_filename = "foreigners.json"
with open(json_filename) as foreigners_file:
foreigners = json.load(foreigners_file)
foreigners["foreigners"].append(int(user_id))
with open(json_filename, "w") as foreigners_file:
json.dump(foreigners, foreigners_file, indent=4)
def discover_followers(api, relationships, user_id, verbose=False):
"""
Uses the given Tweepy API object instance to find the followers of the user with the given user_id.
Updates and returns the given "relationships" dict.
"""
new_relationships = []
try:
for many_users in tweepy.Cursor(api.followers_ids, user_id=user_id).pages():
relationships = create_new_users(api, relationships, many_users, verbose=verbose)
new_relationships.extend(many_users)
except tweepy.error.TweepError as error:
print(str(error) + ", ignored")
pass # The API tends to emit the response "buffering", which Tweepy does not understand. Blissfully ignoring!
relationships[str(user_id)] = new_relationships
return relationships
def looks_icelandic(location_string):
"""
Returns True if the given string is something that could be taken to mean "Iceland", False otherwise.
"""
return any(loc in location_string for loc in ["Iceland", "Ísland", "Island", "iceland", "ísland", "island"])
def main():
# Some hard-coded variables that probably should be parameters
json_filename = "relationships_by_id.json"
verbose = True
time_limit = 0 #60 * 60 * 16
start_time = time()
# Read all the connections we have already discovered:
with open(json_filename) as relationship_file:
if verbose:
print("Reading previously recorded relationships")
relationships = json.load(relationship_file)
# Look for users we've never seen before, and record them
api = get_tweepy_instance()
if verbose:
print("Searching for tweets in Iceland")
icelanders = discover_icelanders(api, list(relationships.keys()))
relationships = create_new_users(api, relationships, icelanders, True)
# Find users whose follower data we still haven't stored, and remedy the situation
without_followers = [user_id for user_id in relationships.keys() if len(relationships[user_id]) == 0]
for user_id in without_followers:
if verbose:
print("Looking for followers for {0}".format(user_id))
relationships = discover_followers(api, relationships, user_id, verbose)
print("Time elapsed: {0}".format(time() - start_time))
if time() - start_time > time_limit:
break
# Store the new data
with open(json_filename, "w") as relationship_file:
json.dump(relationships, relationship_file, indent=4)
if __name__ == '__main__':
main()
| 36.005747 | 118 | 0.68332 |
2bb240936067da76ac5445b23514763a9d05014d | 7,773 | py | Python | tests/test_tversky_loss.py | benduffy1/MONAI | 046e625b09262261373d7b8039fb652547201368 | [
"Apache-2.0"
] | 3 | 2020-06-22T20:59:14.000Z | 2021-04-09T21:24:45.000Z | tests/test_tversky_loss.py | Borda/MONAI | e0db5a564225a7cb62e7a23df97267019006302f | [
"Apache-2.0"
] | null | null | null | tests/test_tversky_loss.py | Borda/MONAI | e0db5a564225a7cb62e7a23df97267019006302f | [
"Apache-2.0"
] | 1 | 2020-05-27T12:53:58.000Z | 2020-05-27T12:53:58.000Z | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.losses import TverskyLoss
from tests.utils import SkipIfBeforePyTorchVersion, test_script_save
TEST_CASES = [
[ # shape: (1, 1, 2, 2), (1, 1, 2, 2)
{"include_background": True, "sigmoid": True, "smooth_nr": 1e-6, "smooth_dr": 1e-6},
{"input": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), "target": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]])},
0.307576,
],
[ # shape: (2, 1, 2, 2), (2, 1, 2, 2)
{"include_background": True, "sigmoid": True, "smooth_nr": 1e-4, "smooth_dr": 1e-4},
{
"input": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]], [[[1.0, -1.0], [-1.0, 1.0]]]]),
"target": torch.tensor([[[[1.0, 1.0], [1.0, 1.0]]], [[[1.0, 0.0], [1.0, 0.0]]]]),
},
0.416657,
],
[ # shape: (2, 2, 3), (2, 1, 3)
{"include_background": False, "to_onehot_y": True, "smooth_nr": 0, "smooth_dr": 0},
{
"input": torch.tensor([[[1.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]]]),
"target": torch.tensor([[[0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0]]]),
},
0.0,
],
[ # shape: (2, 2, 3), (2, 1, 3)
{"include_background": False, "to_onehot_y": True, "smooth_nr": 0, "smooth_dr": 1e-3},
{
"input": torch.tensor([[[1.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]]]),
"target": torch.tensor([[[0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0]]]),
},
0.000999,
],
[ # shape: (2, 2, 3), (2, 1, 3)
{"include_background": True, "to_onehot_y": True, "sigmoid": True, "smooth_nr": 1e-4, "smooth_dr": 1e-4},
{
"input": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]),
"target": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]),
},
0.435050,
],
[ # shape: (2, 2, 3), (2, 1, 3)
{"include_background": True, "to_onehot_y": True, "sigmoid": True, "batch": True},
{
"input": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]),
"target": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]),
},
0.422979,
],
[ # shape: (2, 2, 3), (2, 1, 3)
{
"include_background": True,
"to_onehot_y": True,
"sigmoid": True,
"reduction": "sum",
"smooth_nr": 1e-4,
"smooth_dr": 1e-4,
},
{
"input": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]),
"target": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]),
},
1.74013,
],
[ # shape: (2, 2, 3), (2, 1, 3)
{"include_background": True, "to_onehot_y": True, "softmax": True, "smooth_nr": 1e-4, "smooth_dr": 1e-4},
{
"input": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]),
"target": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]),
},
0.383713,
],
[ # shape: (2, 2, 3), (2, 1, 3)
{
"include_background": True,
"to_onehot_y": True,
"softmax": True,
"reduction": "none",
"smooth_nr": 1e-4,
"smooth_dr": 1e-4,
},
{
"input": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]),
"target": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]),
},
[[0.210961, 0.295339], [0.599952, 0.428547]],
],
[ # shape: (1, 1, 2, 2), (1, 1, 2, 2)
{"include_background": True, "sigmoid": True, "alpha": 0.3, "beta": 0.7, "smooth_nr": 1e-6, "smooth_dr": 1e-6},
{"input": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), "target": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]])},
0.3589,
],
[ # shape: (1, 1, 2, 2), (1, 1, 2, 2)
{"include_background": True, "sigmoid": True, "alpha": 0.7, "beta": 0.3, "smooth_nr": 1e-6, "smooth_dr": 1e-6},
{"input": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), "target": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]])},
0.247366,
],
[ # shape: (2, 1, 2, 2), (2, 1, 2, 2)
{"include_background": True, "other_act": torch.tanh, "smooth_nr": 1e-4, "smooth_dr": 1e-4},
{
"input": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]], [[[1.0, -1.0], [-1.0, 1.0]]]]),
"target": torch.tensor([[[[1.0, 1.0], [1.0, 1.0]]], [[[1.0, 0.0], [1.0, 0.0]]]]),
},
0.999963,
],
[ # shape: (2, 1, 2, 2), (2, 1, 2, 2)
{"include_background": True, "other_act": torch.tanh, "smooth_nr": 0, "smooth_dr": 1e-3, "batch": True},
{
"input": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]], [[[1.0, -1.0], [-1.0, 1.0]]]]),
"target": torch.tensor([[[[1.0, 1.0], [1.0, 1.0]]], [[[1.0, 0.0], [1.0, 0.0]]]]),
},
0.999963,
],
[ # shape: (2, 2, 3), (2, 1, 3)
{
"include_background": True,
"to_onehot_y": True,
"other_act": lambda x: torch.log_softmax(x, dim=1),
"smooth_nr": 1e-4,
"smooth_dr": 1e-4,
},
{
"input": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]),
"target": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]),
},
-8.533317,
],
]
class TestTverskyLoss(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test_shape(self, input_param, input_data, expected_val):
result = TverskyLoss(**input_param).forward(**input_data)
np.testing.assert_allclose(result.detach().cpu().numpy(), expected_val, rtol=1e-4)
def test_ill_shape(self):
loss = TverskyLoss()
with self.assertRaisesRegex(AssertionError, ""):
loss.forward(torch.ones((2, 2, 3)), torch.ones((4, 5, 6)))
chn_input = torch.ones((1, 1, 3))
chn_target = torch.ones((1, 1, 3))
with self.assertRaisesRegex(ValueError, ""):
TverskyLoss(reduction="unknown")(chn_input, chn_target)
with self.assertRaisesRegex(ValueError, ""):
TverskyLoss(reduction=None)(chn_input, chn_target)
def test_input_warnings(self):
chn_input = torch.ones((1, 1, 3))
chn_target = torch.ones((1, 1, 3))
with self.assertWarns(Warning):
loss = TverskyLoss(include_background=False)
loss.forward(chn_input, chn_target)
with self.assertWarns(Warning):
loss = TverskyLoss(softmax=True)
loss.forward(chn_input, chn_target)
with self.assertWarns(Warning):
loss = TverskyLoss(to_onehot_y=True)
loss.forward(chn_input, chn_target)
@SkipIfBeforePyTorchVersion((1, 7, 0))
def test_script(self):
loss = TverskyLoss()
test_input = torch.ones(2, 1, 8, 8)
test_script_save(loss, test_input, test_input)
if __name__ == "__main__":
unittest.main()
| 41.566845 | 119 | 0.487328 |
831a8fbfc0a2088437ce47ecdd01a6351032a489 | 3,630 | py | Python | libetrv/data_struct.py | AdamStrojek/libetrv | 28a48676f15180d71feadca163859d4b2d8832c3 | [
"Apache-2.0"
] | 36 | 2019-06-20T09:00:19.000Z | 2022-01-26T23:36:46.000Z | libetrv/data_struct.py | AdamStrojek/libetrv | 28a48676f15180d71feadca163859d4b2d8832c3 | [
"Apache-2.0"
] | 23 | 2019-05-10T09:47:05.000Z | 2021-09-28T22:19:31.000Z | libetrv/data_struct.py | AdamStrojek/libetrv | 28a48676f15180d71feadca163859d4b2d8832c3 | [
"Apache-2.0"
] | 14 | 2019-10-28T01:29:29.000Z | 2021-02-10T19:54:35.000Z | import enum
from .properties import eTRVData, eTRVSingleData
from .fields import eTRVField, TemperatureField, UTCDateTimeField, LocalDateTimeField, EnumField, \
HexField, TextField, BitField
class BatteryData(eTRVSingleData):
battery = eTRVField(read_only=True)
class Meta:
structure = {
0x10: """
unsigned char battery;
""",
}
use_encoding = False
read_only = True
direct_field = 'battery'
class ScheduleMode(enum.Enum):
MANUAL = 0
SCHEDULED = 1
VACATION = 3
HOLD = 5
class ConfigBits(enum.IntEnum):
ADAPTABLE_REGULATION = 0
VERTICAL_INSTALATION = 2
DISPLAY_FLIP = 3
SLOW_REGULATION = 4
VALVE_INSTALLED = 6
LOCK_CONTROL = 7
class SettingsData(eTRVData):
adaptable_regulation = BitField(name='config_bits', bit_position=ConfigBits.ADAPTABLE_REGULATION)
vertical_instalation = BitField(name='config_bits', bit_position=ConfigBits.VERTICAL_INSTALATION)
display_flip = BitField(name='config_bits', bit_position=ConfigBits.DISPLAY_FLIP)
slow_regulation = BitField(name='config_bits', bit_position=ConfigBits.SLOW_REGULATION)
valve_installed = BitField(name='config_bits', bit_position=ConfigBits.VALVE_INSTALLED)
lock_control = BitField(name='config_bits', bit_position=ConfigBits.LOCK_CONTROL)
temperature_min = TemperatureField()
temperature_max = TemperatureField()
frost_protection_temperature = TemperatureField()
schedule_mode = EnumField(enum_class=ScheduleMode)
vacation_temperature = TemperatureField()
vacation_from = UTCDateTimeField()
vacation_to = UTCDateTimeField()
class Meta:
structure = {
0x2a: """
unsigned char config_bits;
unsigned char temperature_min;
unsigned char temperature_max;
unsigned char frost_protection_temperature;
unsigned char schedule_mode;
unsigned char vacation_temperature;
int vacation_from;
int vacation_to;
unsigned char padding[2];
"""
}
class TemperatureData(eTRVData):
room_temperature = TemperatureField(read_only=True)
set_point_temperature = TemperatureField(auto_save=True)
class Meta:
structure = {
0x2d: """
unsigned char set_point_temperature;
unsigned char room_temperature;
unsigned char padding[6];
"""
}
class NameData(eTRVSingleData):
name = TextField(max_length=16, auto_save=True)
class Meta:
structure = {
0x30: """
char name[16];
"""
}
direct_field = 'name'
class CurrentTimeData(eTRVSingleData):
current_time = LocalDateTimeField('time_local', tz_field='time_offset')
class Meta:
structure = {
0x36: """
int time_local;
int time_offset;
"""
}
direct_field = 'current_time'
class SecretKeyData(eTRVSingleData):
key = HexField(read_only=True)
class Meta:
structure = {
0x3f: """
char key[16];
"""
}
use_encoding = False
direct_field = 'key'
# class DaySchedule(eTRVData):
# __struct__ = """
# unsigned char _data[6];
# """
# class ScheduleStruct(eTRVData):
# __struct__ = """
# unsigned char _home_temperature;
# unsigned char _away_temperature;
# struct DaySchedule _schedule[7];
# """
| 27.709924 | 101 | 0.622865 |
f2d93478688e5faa20126822fc567b09c05fc7fc | 569 | py | Python | test/rules/test_bite_child.py | rileyhazard/SmartVA-Analyze-1 | 0573eeff27d03f54e7506db4f1631c0cd9f54bbb | [
"MIT"
] | 4 | 2019-01-23T12:57:47.000Z | 2020-04-18T17:13:08.000Z | test/rules/test_bite_child.py | rileyhazard/SmartVA-Analyze-1 | 0573eeff27d03f54e7506db4f1631c0cd9f54bbb | [
"MIT"
] | 4 | 2019-01-09T22:10:07.000Z | 2022-02-16T04:57:06.000Z | test/rules/test_bite_child.py | rileyhazard/SmartVA-Analyze-1 | 0573eeff27d03f54e7506db4f1631c0cd9f54bbb | [
"MIT"
] | 11 | 2018-12-11T22:01:13.000Z | 2022-01-07T11:38:02.000Z | from smartva.rules import bite_child as bite
from smartva.data.constants import *
VA = Child
def test_pass():
row = {
VA.BITE: YES,
VA.INJURY_DAYS: 0,
}
assert bite.logic_rule(row) is True
def test_fail_bite():
row = {
VA.BITE: NO,
VA.INJURY_DAYS: 0,
}
assert bite.logic_rule(row) is False
def test_fail_days():
row = {
VA.BITE: YES,
VA.INJURY_DAYS: 31,
}
assert bite.logic_rule(row) is False
def test_fail_no_data():
row = {}
assert bite.logic_rule(row) is False
| 14.973684 | 44 | 0.599297 |
e8adedc05b8e2d3d6f2457179753a4f571f787f9 | 12,446 | py | Python | parlai/tasks/multidogo/build.py | twstewart42/ParlAI | db8dc7b684a089427ab5338a67fe50220d2fa622 | [
"MIT"
] | 1 | 2022-03-13T21:02:22.000Z | 2022-03-13T21:02:22.000Z | parlai/tasks/multidogo/build.py | twstewart42/ParlAI | db8dc7b684a089427ab5338a67fe50220d2fa622 | [
"MIT"
] | 1 | 2022-01-18T09:14:27.000Z | 2022-01-18T09:14:27.000Z | parlai/tasks/multidogo/build.py | twstewart42/ParlAI | db8dc7b684a089427ab5338a67fe50220d2fa622 | [
"MIT"
] | 1 | 2022-01-24T13:22:18.000Z | 2022-01-24T13:22:18.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import parlai.core.build_data as build_data
from parlai.core.build_data import DownloadableFile
import csv
from itertools import islice
from pathlib import Path
import os
import json
import re
import tqdm
DEBUG_MISSING_RAW_CONVERSATIONS = False # Unnecessary once Amazon fixes multidogo
RESOURCE = DownloadableFile(
"https://github.com/awslabs/multi-domain-goal-oriented-dialogues-dataset/archive/master.zip",
"raw_data.zip",
"fb59c7261da2d30d9d24b9af309ebb4bf0e5b39f97d718201a7160e591e76a3c",
zipped=True,
)
RAW_DATA_PREFIX = "multi-domain-goal-oriented-dialogues-dataset-master/data/"
RAW_DATA_ANNOTATED_DATA_PATH = "paper_splits"
RAW_DATA_UNANNOTATED_DATA_PATH = "unannotated"
TURN_INTENT = "turn"
SENTENCE_INTENT = "sentence"
TURN_AND_SENTENCE_INTENT = "both"
RAW_DATA_SENTENCE_INTENT_PATH = "splits_annotated_at_sentence_level"
RAW_DATA_TURN_INTENT_PATH = "splits_annotated_at_turn_level"
RAW_DATA_INTENT_BY_TYPE_PATH = {
TURN_INTENT: RAW_DATA_TURN_INTENT_PATH,
SENTENCE_INTENT: RAW_DATA_SENTENCE_INTENT_PATH,
}
DOMAINS = ["airline", "fastfood", "finance", "insurance", "media", "software"]
DATATYPE_TO_RAW_DATA_FILE_NAME = {
"test": "test.tsv",
"train": "train.tsv",
"valid": "dev.tsv",
}
PROCESSED = "processed/"
def _preprocess(opt, datapath, datatype, version):
"""
MultiDoGo conversations take place between an "agent" and a customer". Labeled
customer data is stored in one set of files while the agent data is in another.
There is a common conversation ID between the two, but the conversations are not
listed in a consistent way between the documents. Since we'll have to do work to
associate the data between the files anyway, we might as well process the data into
a new file that'll be easier to deal with.
Stores the data as <multidogo_data_path>/processed/<domain>/<datatype>.txt.
Will skip preprocessing if this file already exists.
"""
domains = opt.get("domains", DOMAINS)
intent_type = opt.get("intent_type", TURN_INTENT)
for domain in domains:
out_dir = get_processed_multidogo_folder(
datapath, domain, datatype, intent_type
)
if build_data.built(out_dir, version):
continue
print(
f" Preprocessing '{domain}' data for '{datatype}' with '{intent_type}' intent labels."
)
Path(out_dir).mkdir(parents=True, exist_ok=True)
# The agent responses for *all* datatypes are in one file.
# We need to iterate through the datatype file to know which lines
# we'll actually need... so build a quick lookup table to know which
# lines in the tsv file we'll need to care about so we're not scanning
# through the whole thing a bunch
unannotated_id_map = _build_conversation_span_map(
_get_unannotated_tsv_data(datapath, domain)
)
# Actually do the work of collating all of the conversations + annotations
# For turn + sentence intent labels, we do two passes, one for sentence
# then one for turn so that we do not add two sets of labels for the
# same conversation ID. We can use this forced structure to do the
# separate categories of turn intent and sentence intent labels. We
# also do a bit of chuking
file_idx = 0
seen_conversations_set = set()
if intent_type == TURN_AND_SENTENCE_INTENT or intent_type == SENTENCE_INTENT:
file_idx, seen_conversations_set = _aggregate_and_write_conversations(
intent_type,
SENTENCE_INTENT,
datapath,
domain,
datatype,
unannotated_id_map,
start_file_idx=file_idx,
skip_ids=set(),
)
if intent_type == TURN_AND_SENTENCE_INTENT or intent_type == TURN_INTENT:
_, _ = _aggregate_and_write_conversations(
intent_type,
TURN_INTENT,
datapath,
domain,
datatype,
unannotated_id_map,
start_file_idx=file_idx,
skip_ids=seen_conversations_set,
)
# mark that we've built this combinations
build_data.mark_done(out_dir, version_string=version)
def get_processed_multidogo_folder(datapath, domain, datatype, intent_type):
return os.path.join(datapath, PROCESSED, domain, intent_type, datatype)
# unannotated data is UNANNOTATED_DATA_PROFIX + <domain> + '.tsv'
# annotated data is ANNOTATED_DATA_PATH + <annotations type> + <domain> + '/' + <datatype> + '.tsv'
def _get_unannotated_tsv_data(datapath, domain):
file_name = os.path.join(
datapath, RAW_DATA_PREFIX, RAW_DATA_UNANNOTATED_DATA_PATH, domain + ".tsv"
)
return csv.reader(open(file_name, "r"), delimiter=",") # comma-separated tsv, lol
def _get_annotated_tsv_data(datapath, domain, datatype, annotation_type):
file_name = os.path.join(
datapath,
RAW_DATA_PREFIX,
RAW_DATA_ANNOTATED_DATA_PATH,
RAW_DATA_INTENT_BY_TYPE_PATH[annotation_type],
domain,
DATATYPE_TO_RAW_DATA_FILE_NAME[datatype],
)
return csv.reader(open(file_name, "r"), delimiter="\t")
def _get_annotated_tsv_data_size(datapath, domain, datatype, annotation_type):
file_name = os.path.join(
datapath,
RAW_DATA_PREFIX,
RAW_DATA_ANNOTATED_DATA_PATH,
RAW_DATA_INTENT_BY_TYPE_PATH[annotation_type],
domain,
DATATYPE_TO_RAW_DATA_FILE_NAME[datatype],
)
return sum(1 for line in open(file_name, 'r'))
def _build_conversation_span_map(unannotated_tsv_object):
result = {} # conversationId to (start line, length) map
start = 0
prev_conversation_id = ""
length = 0
for i, row in enumerate(unannotated_tsv_object):
conversation_id = row[0][
4:-2
] # do substring cause conversationId has extra filler in unannotated
if conversation_id != prev_conversation_id:
result[prev_conversation_id] = (start, length)
start = i
prev_conversation_id = conversation_id
length = 0
length += 1
result[conversation_id] = (start, length)
return result
def _get_slots_map(utterance, slot_string):
values = slot_string.split(" ")
cleaned = re.sub(r"[^\w\s]", "", utterance)
words = cleaned.split(" ")
result = {}
for i in range(len(words)):
if values[i] != "O":
result[values[i]] = words[i]
return result
def _aggregate_and_write_conversations(
raw_intent_type,
fetch_intent_type,
datapath,
domain,
datatype,
unannotated_id_map,
skip_ids,
start_file_idx=0,
):
conversations_to_write = {} # conversationId -> list of turns
seen_conversations = set()
out_dir = get_processed_multidogo_folder(
datapath, domain, datatype, raw_intent_type
)
file_idx = start_file_idx
intent_tsv = _get_annotated_tsv_data(datapath, domain, datatype, fetch_intent_type)
next(intent_tsv) # don't need the header in the first line
print(f"Processing for {domain}, {fetch_intent_type}, {datatype}")
for labeled_line in tqdm.tqdm(
intent_tsv,
total=_get_annotated_tsv_data_size(
datapath, domain, datatype, fetch_intent_type
)
- 1,
):
conversation_id = labeled_line[0]
if conversation_id in skip_ids:
continue
if conversation_id not in seen_conversations:
# new conversation, add text of conversation to conversations_to_write
conversations_to_write[conversation_id] = {}
found_raw_conversation = _add_utterances(
unannotated_id_map,
conversation_id,
conversations_to_write,
datapath,
domain,
)
seen_conversations.add(conversation_id)
if not found_raw_conversation:
if DEBUG_MISSING_RAW_CONVERSATIONS:
print(f"Could not find raw conversations for {conversation_id}")
skip_ids.add(conversation_id)
conversations_to_write.pop(conversation_id, None)
continue
if fetch_intent_type == SENTENCE_INTENT:
_get_sentence_labels_and_slots_map(labeled_line, conversations_to_write)
elif fetch_intent_type == TURN_INTENT:
_get_turn_labels_and_slots_map(labeled_line, conversations_to_write)
else:
raise KeyError(
"Invalid `fetch_intent_type`. This case should never be hit. Something is broken in the `build.py` file."
)
# Don't forget to dump out last file
with open(f"{out_dir}/{file_idx}.json", "w+") as out_file:
json.dump(conversations_to_write, out_file, indent=4)
file_idx += 1
# Return necessary outputs for next pass
return file_idx, seen_conversations
def _add_utterances(
unannotated_id_map, conversation_id, conversations_to_write, datapath, domain
):
try:
start, length = unannotated_id_map[conversation_id]
except KeyError:
return False
conversation_text = islice(
_get_unannotated_tsv_data(datapath, domain), start, start + length
)
for line in conversation_text:
# Format of unannotated: conversationId,turnNumber,utteranceId,utterance,authorRole
conversations_to_write[conversation_id] = {
**conversations_to_write[conversation_id],
int(line[1]): {"text": line[3], "role": line[4]},
}
return True
def _get_sentence_labels_and_slots_map(labeled_line, output):
# Sentence tsv format: conversationId turnNumber sentenceNumber utteranceId utterance slot-labels intent
conversation_id = labeled_line[0]
turn_number = int(float(labeled_line[1])) # cause a few got saved as float.
if conversation_id not in output:
raise RuntimeError("Should never happen; raw conversation text should be here")
if turn_number not in output[conversation_id]:
output[conversation_id][turn_number] = {}
output[conversation_id][turn_number] = {
**output[conversation_id][turn_number],
"slots": _get_slots_map(labeled_line[4], labeled_line[5]),
}
if "intents" not in output[conversation_id][turn_number]:
output[conversation_id][turn_number]["intents"] = []
output[conversation_id][turn_number]["intents"].append(labeled_line[6])
def _get_turn_labels_and_slots_map(labeled_line, output):
# Turn tsv format: conversationId turnNumber utteranceId utterance slot-labels intent
conversation_id = labeled_line[0]
turn_number = int(float(labeled_line[1])) # cause a few got saved as float
if conversation_id not in output:
raise RuntimeError("Should never happen; raw conversation text should be here")
if turn_number not in output[conversation_id]:
output[conversation_id][turn_number] = {}
output[conversation_id][turn_number] = {
**output[conversation_id][turn_number],
"slots": _get_slots_map(labeled_line[3], labeled_line[4]),
"intents": [labeled_line[5]],
}
def build(opt):
# get path to data directory
datapath = os.path.join(opt["datapath"], "multidogo")
# define version if any
version = "v1.1"
# check if data had been previously downloaded
if not build_data.built(datapath, version_string=version):
print("[building data: " + datapath + "]")
# make a clean directory if needed
if build_data.built(datapath):
# an older version exists, so remove these outdated files.
build_data.remove_dir(datapath)
build_data.make_dir(datapath)
# Download the data.
RESOURCE.download_file(datapath)
# mark the data as built
build_data.mark_done(datapath, version_string=version)
# do preprocessing on the data to put it into FBDialogueData format. There's a lot so check to make sure it's okay
for fold in ["train", "valid", "test"]:
_preprocess(opt, datapath, fold, version)
| 37.375375 | 121 | 0.676764 |
1fbfef5b5f45c87385bf1b1860ed8a395724d17a | 8,022 | py | Python | pex/vendor/__init__.py | jneuff/pex | f7eb554ecdbfdb9cd24a793b4dda358035e26855 | [
"Apache-2.0"
] | null | null | null | pex/vendor/__init__.py | jneuff/pex | f7eb554ecdbfdb9cd24a793b4dda358035e26855 | [
"Apache-2.0"
] | null | null | null | pex/vendor/__init__.py | jneuff/pex | f7eb554ecdbfdb9cd24a793b4dda358035e26855 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import collections
import os
from pex.common import filter_pyc_dirs, filter_pyc_files, touch
from pex.compatibility import urlparse
from pex.tracer import TRACER
_PACKAGE_COMPONENTS = __name__.split(".")
def _root():
path = os.path.dirname(os.path.abspath(__file__))
for _ in _PACKAGE_COMPONENTS:
path = os.path.dirname(path)
return path
class VendorSpec(
collections.namedtuple("VendorSpec", ["key", "requirement", "rewrite", "constrain"])
):
"""Represents a vendored distribution.
:field str key: The distribution requirement key; e.g.: for a requirement of
requests[security]==2.22.0 the key is 'requests'.
:field str requirement: The distribution requirement string; e.g.: requests[security]==2.22.0.
:field bool rewrite: Whether to re-write the distribution's imports for use with the
`pex.third_party` importer.
:field bool constrain: Whether to attempt to constrain the requirement via pip's --constraint
mechanism.
NB: Vendored distributions should comply with the host distribution platform constraints. In the
case of pex, which is a py2.py3 platform agnostic wheel, vendored libraries should be as well.
"""
ROOT = _root()
_VENDOR_DIR = "_vendored"
@classmethod
def vendor_root(cls):
return os.path.join(cls.ROOT, *(_PACKAGE_COMPONENTS + [cls._VENDOR_DIR]))
@classmethod
def pinned(cls, key, version, rewrite=True):
return cls(
key=key, requirement="{}=={}".format(key, version), rewrite=rewrite, constrain=True
)
@classmethod
def vcs(cls, url, rewrite=True):
result = urlparse.urlparse(url)
fragment_params = urlparse.parse_qs(result.fragment)
values = fragment_params.get("egg")
if not values or len(values) != 1:
raise ValueError(
"Expected the vcs requirement url to have an #egg=<name> fragment. "
"Got: {}".format(url)
)
# N.B.: Constraints do not work for vcs urls.
return cls(key=values[0], requirement=url, rewrite=rewrite, constrain=False)
@property
def _subpath_components(self):
return [self._VENDOR_DIR, self.key]
@property
def relpath(self):
return os.path.join(*(_PACKAGE_COMPONENTS + self._subpath_components))
@property
def target_dir(self):
return os.path.join(self.ROOT, self.relpath)
def create_packages(self):
"""Create missing packages joining the vendor root to the base of the vendored distribution.
For example, given a root at ``/home/jake/dev/pantsbuild/pex`` and a vendored distribution at
``pex/vendor/_vendored/requests`` this method would create the following package files::
pex/vendor/_vendored/__init__.py
pex/vendor/_vendored/requests/__init__.py
These package files allow for standard python importers to find vendored code via re-directs
from a `PEP-302 <https://www.python.org/dev/peps/pep-0302/>`_ importer like
:class:`pex.third_party.VendorImporter`.
"""
if not self.rewrite:
# The extra package structure is only required for vendored code used via import rewrites.
return
for index, _ in enumerate(self._subpath_components):
relpath = _PACKAGE_COMPONENTS + self._subpath_components[: index + 1] + ["__init__.py"]
touch(os.path.join(self.ROOT, *relpath))
def iter_vendor_specs():
"""Iterate specifications for code vendored by pex.
:return: An iterator over specs of all vendored code.
:rtype: :class:`collection.Iterator` of :class:`VendorSpec`
"""
# We use this via pex.third_party at runtime to check for compatible wheel tags and at build
# time to implement resolving distributions from a PEX repository.
yield VendorSpec.pinned("packaging", "20.4")
# We shell out to pip at buildtime to resolve and install dependencies.
# N.B.: This is pip 20.0.dev0 with a patch to support foreign download targets more fully.
yield VendorSpec.vcs(
"git+https://github.com/pantsbuild/pip@f9dde7cb6bab#egg=pip", rewrite=False
)
# We expose this to pip at buildtime for legacy builds, but we also use pkg_resources via
# pex.third_party at runtime in various ways.
# N.B.: 44.0.0 is the last setuptools version compatible with Python 2.
yield VendorSpec.pinned("setuptools", "44.0.0")
# We expose this to pip at buildtime for legacy builds.
yield VendorSpec.pinned("wheel", "0.35.1", rewrite=False)
def vendor_runtime(chroot, dest_basedir, label, root_module_names):
"""Includes portions of vendored distributions in a chroot.
The portion to include is selected by root module name. If the module is a file, just it is
included. If the module represents a package, the package and all its sub-packages are added
recursively.
:param chroot: The chroot to add vendored code to.
:type chroot: :class:`pex.common.Chroot`
:param str dest_basedir: The prefix to store the vendored code under in the ``chroot``.
:param str label: The chroot label for the vendored code fileset.
:param root_module_names: The names of the root vendored modules to include in the chroot.
:type root_module_names: :class:`collections.Iterable` of str
:raise: :class:`ValueError` if any of the given ``root_module_names`` could not be found amongst
the vendored code and added to the chroot.
"""
vendor_module_names = {root_module_name: False for root_module_name in root_module_names}
for spec in iter_vendor_specs():
for root, dirs, files in os.walk(spec.target_dir):
if root == spec.target_dir:
dirs[:] = [pkg_name for pkg_name in dirs if pkg_name in vendor_module_names]
files[:] = [mod_name for mod_name in files if mod_name[:-3] in vendor_module_names]
vendored_names = dirs + [filename[:-3] for filename in files]
if vendored_names:
pkg_path = ""
for pkg in spec.relpath.split(os.sep):
pkg_path = os.path.join(pkg_path, pkg)
pkg_file = os.path.join(pkg_path, "__init__.py")
src = os.path.join(VendorSpec.ROOT, pkg_file)
dest = os.path.join(dest_basedir, pkg_file)
if os.path.exists(src):
chroot.copy(src, dest, label)
else:
# We delete `pex/vendor/_vendored/<dist>/__init__.py` when isolating third_party.
chroot.touch(dest, label)
for name in vendored_names:
vendor_module_names[name] = True
TRACER.log(
"Vendoring {} from {} @ {}".format(name, spec, spec.target_dir), V=3
)
# We copy over sources and data only; no pyc files.
dirs[:] = filter_pyc_dirs(dirs)
for filename in filter_pyc_files(files):
src = os.path.join(root, filename)
dest = os.path.join(
dest_basedir, spec.relpath, os.path.relpath(src, spec.target_dir)
)
chroot.copy(src, dest, label)
if not all(vendor_module_names.values()):
raise ValueError(
"Failed to extract {module_names} from:\n\t{specs}".format(
module_names=", ".join(
module for module, written in vendor_module_names.items() if not written
),
specs="\n\t".join(
"{} @ {}".format(spec, spec.target_dir) for spec in iter_vendor_specs()
),
)
)
| 42.670213 | 109 | 0.639865 |
05bd693ed23d6ac36953df0e3331778102f52fde | 17,416 | py | Python | google/cloud/gaming_v1/services/realms_service/transports/grpc.py | renovate-bot/python-game-servers | d60305505e0afbed940a2a7f0c1a6fc0e6eca56d | [
"Apache-2.0"
] | null | null | null | google/cloud/gaming_v1/services/realms_service/transports/grpc.py | renovate-bot/python-game-servers | d60305505e0afbed940a2a7f0c1a6fc0e6eca56d | [
"Apache-2.0"
] | null | null | null | google/cloud/gaming_v1/services/realms_service/transports/grpc.py | renovate-bot/python-game-servers | d60305505e0afbed940a2a7f0c1a6fc0e6eca56d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.gaming_v1.types import realms
from google.longrunning import operations_pb2 # type: ignore
from .base import RealmsServiceTransport, DEFAULT_CLIENT_INFO
class RealmsServiceGrpcTransport(RealmsServiceTransport):
"""gRPC backend transport for RealmsService.
A realm is a grouping of game server clusters that are
considered interchangeable.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "gameservices.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "gameservices.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def list_realms(
self,
) -> Callable[[realms.ListRealmsRequest], realms.ListRealmsResponse]:
r"""Return a callable for the list realms method over gRPC.
Lists realms in a given project and location.
Returns:
Callable[[~.ListRealmsRequest],
~.ListRealmsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_realms" not in self._stubs:
self._stubs["list_realms"] = self.grpc_channel.unary_unary(
"/google.cloud.gaming.v1.RealmsService/ListRealms",
request_serializer=realms.ListRealmsRequest.serialize,
response_deserializer=realms.ListRealmsResponse.deserialize,
)
return self._stubs["list_realms"]
@property
def get_realm(self) -> Callable[[realms.GetRealmRequest], realms.Realm]:
r"""Return a callable for the get realm method over gRPC.
Gets details of a single realm.
Returns:
Callable[[~.GetRealmRequest],
~.Realm]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_realm" not in self._stubs:
self._stubs["get_realm"] = self.grpc_channel.unary_unary(
"/google.cloud.gaming.v1.RealmsService/GetRealm",
request_serializer=realms.GetRealmRequest.serialize,
response_deserializer=realms.Realm.deserialize,
)
return self._stubs["get_realm"]
@property
def create_realm(
self,
) -> Callable[[realms.CreateRealmRequest], operations_pb2.Operation]:
r"""Return a callable for the create realm method over gRPC.
Creates a new realm in a given project and location.
Returns:
Callable[[~.CreateRealmRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_realm" not in self._stubs:
self._stubs["create_realm"] = self.grpc_channel.unary_unary(
"/google.cloud.gaming.v1.RealmsService/CreateRealm",
request_serializer=realms.CreateRealmRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_realm"]
@property
def delete_realm(
self,
) -> Callable[[realms.DeleteRealmRequest], operations_pb2.Operation]:
r"""Return a callable for the delete realm method over gRPC.
Deletes a single realm.
Returns:
Callable[[~.DeleteRealmRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_realm" not in self._stubs:
self._stubs["delete_realm"] = self.grpc_channel.unary_unary(
"/google.cloud.gaming.v1.RealmsService/DeleteRealm",
request_serializer=realms.DeleteRealmRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_realm"]
@property
def update_realm(
self,
) -> Callable[[realms.UpdateRealmRequest], operations_pb2.Operation]:
r"""Return a callable for the update realm method over gRPC.
Patches a single realm.
Returns:
Callable[[~.UpdateRealmRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_realm" not in self._stubs:
self._stubs["update_realm"] = self.grpc_channel.unary_unary(
"/google.cloud.gaming.v1.RealmsService/UpdateRealm",
request_serializer=realms.UpdateRealmRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_realm"]
@property
def preview_realm_update(
self,
) -> Callable[
[realms.PreviewRealmUpdateRequest], realms.PreviewRealmUpdateResponse
]:
r"""Return a callable for the preview realm update method over gRPC.
Previews patches to a single realm.
Returns:
Callable[[~.PreviewRealmUpdateRequest],
~.PreviewRealmUpdateResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "preview_realm_update" not in self._stubs:
self._stubs["preview_realm_update"] = self.grpc_channel.unary_unary(
"/google.cloud.gaming.v1.RealmsService/PreviewRealmUpdate",
request_serializer=realms.PreviewRealmUpdateRequest.serialize,
response_deserializer=realms.PreviewRealmUpdateResponse.deserialize,
)
return self._stubs["preview_realm_update"]
def close(self):
self.grpc_channel.close()
__all__ = ("RealmsServiceGrpcTransport",)
| 42.791155 | 87 | 0.631833 |
e26490a446b4843739b32641e1a95211864cb58f | 8,781 | py | Python | test/units/modules/network/f5/test_bigip_command.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 37 | 2017-08-15T15:02:43.000Z | 2021-07-23T03:44:31.000Z | test/units/modules/network/f5/test_bigip_command.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 12 | 2018-01-10T05:25:25.000Z | 2021-11-28T06:55:48.000Z | test/units/modules/network/f5/test_bigip_command.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 49 | 2017-08-15T09:52:13.000Z | 2022-03-21T17:11:54.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.compat.tests.mock import Mock
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_command import Parameters
from library.modules.bigip_command import ModuleManager
from library.modules.bigip_command import V1Manager
from library.modules.bigip_command import V2Manager
from library.modules.bigip_command import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_command import Parameters
from ansible.modules.network.f5.bigip_command import ModuleManager
from ansible.modules.network.f5.bigip_command import V1Manager
from ansible.modules.network.f5.bigip_command import V2Manager
from ansible.modules.network.f5.bigip_command import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
commands=[
"tmsh show sys version"
],
server='localhost',
user='admin',
password='password'
)
p = Parameters(params=args)
assert len(p.commands) == 1
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_run_single_command(self, *args):
set_module_args(dict(
commands=[
"tmsh show sys version"
],
server='localhost',
user='admin',
password='password'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
m1 = V2Manager(module=module)
m1.execute_on_device = Mock(return_value=['resp1', 'resp2'])
mm = ModuleManager(module=module)
mm._run_commands = Mock(return_value=[])
mm.get_manager = Mock(return_value=m1)
results = mm.exec_module()
assert results['changed'] is False
assert mm._run_commands.call_count == 0
assert m1.execute_on_device.call_count == 2
def test_run_single_modification_command(self, *args):
set_module_args(dict(
commands=[
"tmsh create ltm virtual foo"
],
server='localhost',
user='admin',
password='password'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
m1 = V2Manager(module=module)
m1.execute_on_device = Mock(return_value=['resp1', 'resp2'])
mm = ModuleManager(module=module)
mm._run_commands = Mock(return_value=[])
mm.get_manager = Mock(return_value=m1)
results = mm.exec_module()
assert results['changed'] is True
assert mm._run_commands.call_count == 0
assert m1.execute_on_device.call_count == 2
def test_cli_command(self, *args):
set_module_args(dict(
commands=[
"show sys version"
],
server='localhost',
user='admin',
password='password',
transport='cli'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
m1 = V1Manager(module=module)
m1.execute_on_device = Mock(return_value=['resp1', 'resp2', 'resp3'])
mm = ModuleManager(module=module)
mm._run_commands = Mock(return_value=[])
mm.get_manager = Mock(return_value=m1)
results = mm.exec_module()
assert results['changed'] is False
# call count is two on CLI transport because we must first
# determine if the remote CLI is in tmsh mode or advanced shell
# (bash) mode.
#
# 1 call for the shell check
# 1 call for the command in the "commands" list above
#
# Can we change this in the future by making the terminal plugin
# find this out ahead of time?
assert m1.execute_on_device.call_count == 3
def test_command_with_commas(self, *args):
set_module_args(dict(
commands="""
tmsh create /auth ldap system-auth {bind-dn uid=binduser,
cn=users,dc=domain,dc=com bind-pw $ENCRYPTEDPW check-roles-group
enabled search-base-dn cn=users,dc=domain,dc=com servers add {
ldap.server.com } }
""",
server='localhost',
user='admin',
password='password'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
m1 = V2Manager(module=module)
m1.execute_on_device = Mock(return_value=['resp1', 'resp2'])
mm = ModuleManager(module=module)
mm.get_manager = Mock(return_value=m1)
results = mm.exec_module()
assert results['changed'] is True
assert m1.execute_on_device.call_count == 2
def test_normalizing_command_show(self, *args):
args = dict(
commands=[
"show sys version"
],
)
result = V2Manager.normalize_commands(args['commands'])
assert result[0] == 'show sys version'
def test_normalizing_command_delete(self, *args):
args = dict(
commands=[
"delete sys version"
],
)
result = V2Manager.normalize_commands(args['commands'])
assert result[0] == 'delete sys version'
def test_normalizing_command_modify(self, *args):
args = dict(
commands=[
"modify sys version"
],
)
result = V2Manager.normalize_commands(args['commands'])
assert result[0] == 'modify sys version'
def test_normalizing_command_list(self, *args):
args = dict(
commands=[
"list sys version"
],
)
result = V2Manager.normalize_commands(args['commands'])
assert result[0] == 'list sys version'
def test_normalizing_command_tmsh_show(self, *args):
args = dict(
commands=[
"tmsh show sys version"
],
)
result = V2Manager.normalize_commands(args['commands'])
assert result[0] == 'show sys version'
def test_normalizing_command_tmsh_delete(self, *args):
args = dict(
commands=[
"tmsh delete sys version"
],
)
result = V2Manager.normalize_commands(args['commands'])
assert result[0] == 'delete sys version'
def test_normalizing_command_tmsh_modify(self, *args):
args = dict(
commands=[
"tmsh modify sys version"
],
)
result = V2Manager.normalize_commands(args['commands'])
assert result[0] == 'modify sys version'
def test_normalizing_command_tmsh_list(self, *args):
args = dict(
commands=[
"tmsh list sys version"
],
)
result = V2Manager.normalize_commands(args['commands'])
assert result[0] == 'list sys version'
| 30.071918 | 91 | 0.613028 |
64a60980f86d283938f5b5efaa137005cf7c282d | 3,103 | py | Python | setup.py | xiaomengy/moolib | d46826b4dead28e7a9be6545f46bcf159c77ca17 | [
"MIT"
] | 2 | 2022-02-08T02:42:22.000Z | 2022-02-10T03:41:54.000Z | setup.py | xiaomengy/moolib | d46826b4dead28e7a9be6545f46bcf159c77ca17 | [
"MIT"
] | null | null | null | setup.py | xiaomengy/moolib | d46826b4dead28e7a9be6545f46bcf159c77ca17 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# To install: pip install .
#
# For debug builds: python setup.py build --debug install
#
# The environment variable USE_CUDA can be set to "OFF" (or 0).
#
import os
import pathlib
import subprocess
import sys
import setuptools
from setuptools.command import build_ext
from distutils import spawn
class CMakeBuild(build_ext.build_ext):
def run(self): # Necessary for pip install -e.
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
source_path = pathlib.Path(__file__).parent.resolve()
output_path = pathlib.Path(self.get_ext_fullpath(ext.name)).parent.absolute()
os.makedirs(self.build_temp, exist_ok=True)
build_type = "Debug" if self.debug else "RelWithDebInfo"
generator = "Ninja" if spawn.find_executable("ninja") else "Unix Makefiles"
cmake_cmd = [
"cmake",
str(source_path),
"-G%s" % generator,
"-DCMAKE_BUILD_TYPE=%s" % build_type,
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=%s" % output_path,
]
use_cuda = os.environ.get("USE_CUDA", True)
if use_cuda == "OFF":
use_cuda = False
if not int(use_cuda):
cmake_cmd.append("-DUSE_CUDA=OFF")
build_cmd = ["cmake", "--build", ".", "--parallel"]
# pip install (but not python setup.py install) runs with a modified PYTHONPATH.
# This can prevent cmake from finding the torch libraries.
env = os.environ.copy()
if "PYTHONPATH" in env:
del env["PYTHONPATH"]
try:
subprocess.check_call(cmake_cmd, cwd=self.build_temp, env=env)
subprocess.check_call(build_cmd, cwd=self.build_temp, env=env)
except subprocess.CalledProcessError:
# Don't obscure the error with a setuptools backtrace.
sys.exit(1)
def main():
with open("README.md") as f:
long_description = f.read()
setuptools.setup(
name="moolib",
version="0.0.9",
description=("A library for distributed ML training with PyTorch"),
long_description=long_description,
long_description_content_type="text/markdown",
author="tscmoo & the moolib dev team",
url="https://github.com/facebookresearch/moolib",
classifiers=[
"Programming Language :: C++",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Environment :: GPU :: NVIDIA CUDA",
],
packages=["moolib", "moolib.examples.common", "moolib.examples.vtrace"],
package_dir={"": "py", "moolib.examples": "examples"},
ext_modules=[setuptools.Extension("moolib._C", sources=[])],
install_requires=["torch>=1.6.0"],
cmdclass={"build_ext": CMakeBuild},
zip_safe=False,
)
if __name__ == "__main__":
main()
| 31.989691 | 88 | 0.614567 |
b0fc72a05a6d7210e5155a3f940d34eb99842c4d | 8,645 | py | Python | tests/api/benchmark/test_permission_group.py | baitsanape/saleor | 9594b40fb23bb6e665ba207c8ef436043e372bba | [
"CC-BY-4.0"
] | 2 | 2020-05-28T19:29:33.000Z | 2020-05-28T19:29:39.000Z | tests/api/benchmark/test_permission_group.py | baitsanape/saleor | 9594b40fb23bb6e665ba207c8ef436043e372bba | [
"CC-BY-4.0"
] | 13 | 2021-03-19T02:54:47.000Z | 2022-03-12T00:36:26.000Z | tests/api/benchmark/test_permission_group.py | baitsanape/saleor | 9594b40fb23bb6e665ba207c8ef436043e372bba | [
"CC-BY-4.0"
] | 1 | 2020-05-15T18:18:52.000Z | 2020-05-15T18:18:52.000Z | import graphene
import pytest
from django.contrib.auth.models import Group
from saleor.core.permissions import AccountPermissions
from tests.api.utils import get_graphql_content
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_permission_group_create(
staff_user,
permission_manage_staff,
staff_api_client,
permission_manage_users,
permission_manage_apps,
count_queries,
):
staff_user.user_permissions.add(permission_manage_users, permission_manage_apps)
query = """
mutation PermissionGroupCreate(
$input: PermissionGroupCreateInput!) {
permissionGroupCreate(
input: $input)
{
group{
id
name
permissions {
name
code
}
users {
email
}
}
permissionGroupErrors{
field
code
permissions
users
message
}
}
}
"""
group_count = Group.objects.count()
variables = {
"input": {
"name": "New permission group",
"addPermissions": [
AccountPermissions.MANAGE_USERS.name,
AccountPermissions.MANAGE_SERVICE_ACCOUNTS.name,
],
"addUsers": [graphene.Node.to_global_id("User", staff_user.id)],
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=(permission_manage_staff,)
)
content = get_graphql_content(response)
data = content["data"]["permissionGroupCreate"]
groups = Group.objects.all()
assert data["permissionGroupErrors"] == []
assert len(groups) == group_count + 1
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_permission_group_update(
permission_group_manage_users,
staff_users,
permission_manage_staff,
staff_api_client,
permission_manage_apps,
permission_manage_users,
count_queries,
):
query = """
mutation PermissionGroupUpdate(
$id: ID!, $input: PermissionGroupUpdateInput!) {
permissionGroupUpdate(
id: $id, input: $input)
{
group{
id
name
permissions {
name
code
}
}
permissionGroupErrors{
field
code
permissions
users
message
}
}
}
"""
staff_user = staff_users[0]
staff_user.user_permissions.add(permission_manage_apps, permission_manage_users)
group1, group2 = Group.objects.bulk_create(
[Group(name="manage users"), Group(name="manage staff and users")]
)
group1.permissions.add(permission_manage_users)
group2.permissions.add(permission_manage_users, permission_manage_staff)
group1_user = staff_users[1]
group1.user_set.add(group1_user)
group2.user_set.add(staff_user)
group_count = Group.objects.count()
variables = {
"id": graphene.Node.to_global_id("Group", group1.id),
"input": {
"name": "New permission group",
"addPermissions": [AccountPermissions.MANAGE_SERVICE_ACCOUNTS.name],
"removePermissions": [AccountPermissions.MANAGE_USERS.name],
"addUsers": [graphene.Node.to_global_id("User", staff_user.pk)],
"removeUsers": [
graphene.Node.to_global_id("User", group1.user_set.first().pk)
],
},
}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["permissionGroupUpdate"]
groups = Group.objects.all()
assert data["permissionGroupErrors"] == []
assert len(groups) == group_count
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_permission_group_update_remove_users_with_manage_staff(
permission_group_manage_users,
staff_users,
permission_manage_staff,
staff_api_client,
permission_manage_apps,
permission_manage_users,
permission_manage_orders,
count_queries,
):
query = """
mutation PermissionGroupUpdate(
$id: ID!, $input: PermissionGroupUpdateInput!) {
permissionGroupUpdate(
id: $id, input: $input)
{
group{
id
name
permissions {
name
code
}
users {
email
}
}
permissionGroupErrors{
field
code
permissions
users
message
}
}
}
"""
staff_user, staff_user1, staff_user2 = staff_users
groups = Group.objects.bulk_create(
[Group(name="manage users"), Group(name="manage staff, order and users")]
)
group1, group2 = groups
group1.permissions.add(permission_manage_staff, permission_manage_users)
group2.permissions.add(
permission_manage_staff, permission_manage_orders, permission_manage_users
)
group1.user_set.add(staff_user1, staff_user2)
group2.user_set.add(staff_user2)
staff_user.user_permissions.add(permission_manage_users, permission_manage_orders)
variables = {
"id": graphene.Node.to_global_id("Group", group1.id),
"input": {
"removeUsers": [
graphene.Node.to_global_id("User", user.id)
for user in [staff_user1, staff_user2]
],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=(permission_manage_staff,)
)
content = get_graphql_content(response)
data = content["data"]["permissionGroupUpdate"]
assert len(data["group"]["users"]) == 0
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_permission_group_delete(
staff_users,
permission_manage_staff,
permission_manage_orders,
permission_manage_products,
staff_api_client,
count_queries,
):
query = """
mutation PermissionGroupDelete($id: ID!) {
permissionGroupDelete(
id: $id)
{
group{
id
name
permissions {
name
code
}
}
permissionGroupErrors{
field
code
users
permissions
message
}
}
}
"""
staff_user1, staff_user2, _ = staff_users
staff_user1.user_permissions.add(
permission_manage_orders, permission_manage_products
)
groups = Group.objects.bulk_create(
[Group(name="manage orders"), Group(name="manage orders and products")]
)
group1, group2 = groups
group1.permissions.add(permission_manage_orders, permission_manage_staff)
group2.permissions.add(
permission_manage_orders, permission_manage_products, permission_manage_staff
)
staff_user2.groups.add(group1, group2)
group_count = Group.objects.count()
variables = {"id": graphene.Node.to_global_id("Group", group1.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=(permission_manage_staff,)
)
content = get_graphql_content(response)
data = content["data"]["permissionGroupDelete"]
assert data
assert Group.objects.count() == group_count - 1
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_permission_group_query(
permission_group_manage_users,
staff_user,
permission_manage_staff,
permission_manage_users,
staff_api_client,
count_queries,
):
staff_user.user_permissions.add(permission_manage_staff, permission_manage_users)
group = permission_group_manage_users
query = """
query ($id: ID!){
permissionGroup(id: $id){
id
name
permissions {
name
code
}
users{
email
}
userCanManage
}
}
"""
variables = {"id": graphene.Node.to_global_id("Group", group.id)}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["permissionGroup"]
assert data
| 27.797428 | 86 | 0.595604 |
facb1ab8b0e67ee945169f2cf225a64e4303e93b | 4,784 | py | Python | pypureclient/flasharray/FA_2_6/models/session_get_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_6/models/session_get_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_6/models/session_get_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_6 import models
class SessionGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[Session]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.Session]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[Session])
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SessionGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SessionGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SessionGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.8 | 524 | 0.60556 |
ce4b29e97a1dc1db0ab9476748a50f68188b3598 | 25,644 | py | Python | code_summary/onmt/translate/translator.py | Nrgeup/review_assistant | bf03d62773501b84069afcc8b3da66d6d7829218 | [
"Apache-2.0"
] | 1 | 2020-01-17T00:41:51.000Z | 2020-01-17T00:41:51.000Z | code_summary/onmt/translate/translator.py | Nrgeup/review_assistant | bf03d62773501b84069afcc8b3da66d6d7829218 | [
"Apache-2.0"
] | null | null | null | code_summary/onmt/translate/translator.py | Nrgeup/review_assistant | bf03d62773501b84069afcc8b3da66d6d7829218 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
""" Translator Class and builder """
from __future__ import print_function
import codecs
import os
import math
import time
from itertools import count
import torch
import onmt.model_builder
import onmt.inputters as inputters
import onmt.decoders.ensemble
from onmt.translate.beam_search import BeamSearch
from onmt.translate.greedy_search import GreedySearch
from onmt.utils.misc import set_random_seed
from onmt.modules.copy_generator import collapse_copy_scores
def build_translator(opt, report_score=True, logger=None, out_file=None):
if out_file is None:
out_file = codecs.open(opt.output, 'w+', 'utf-8')
load_test_model = onmt.decoders.ensemble.load_test_model \
if len(opt.models) > 1 else onmt.model_builder.load_test_model
fields, model, model_opt = load_test_model(opt)
scorer = onmt.translate.GNMTGlobalScorer.from_opt(opt)
translator = Translator.from_opt(
model,
fields,
opt,
model_opt,
global_scorer=scorer,
out_file=out_file,
report_score=report_score,
logger=logger
)
return translator
def max_tok_len(new, count, sofar):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch # this is a hack
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
# max_tgt_in_batch = 0
# Src: [<bos> w1 ... wN <eos>]
max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)
# Tgt: [w1 ... wM <eos>]
src_elements = count * max_src_in_batch
return src_elements
class Translator(object):
"""Translate a batch of sentences with a saved model.
Args:
model (onmt.modules.NMTModel): NMT model to use for translation
fields (dict[str, torchtext.data.Field]): A dict
mapping each side to its list of name-Field pairs.
src_reader (onmt.inputters.DataReaderBase): Source reader.
tgt_reader (onmt.inputters.TextDataReader): Target reader.
gpu (int): GPU device. Set to negative for no GPU.
n_best (int): How many beams to wait for.
min_length (int): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
max_length (int): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
beam_size (int): Number of beams.
random_sampling_topk (int): See
:class:`onmt.translate.greedy_search.GreedySearch`.
random_sampling_temp (int): See
:class:`onmt.translate.greedy_search.GreedySearch`.
stepwise_penalty (bool): Whether coverage penalty is applied every step
or not.
dump_beam (bool): Debugging option.
block_ngram_repeat (int): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
ignore_when_blocking (set or frozenset): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
replace_unk (bool): Replace unknown token.
data_type (str): Source data type.
verbose (bool): Print/log every translation.
report_bleu (bool): Print/log Bleu metric.
report_rouge (bool): Print/log Rouge metric.
report_time (bool): Print/log total time/frequency.
copy_attn (bool): Use copy attention.
global_scorer (onmt.translate.GNMTGlobalScorer): Translation
scoring/reranking object.
out_file (TextIO or codecs.StreamReaderWriter): Output file.
report_score (bool) : Whether to report scores
logger (logging.Logger or NoneType): Logger.
"""
def __init__(
self,
model,
fields,
src_reader,
tgt_reader,
gpu=-1,
n_best=1,
min_length=0,
max_length=100,
ratio=0.,
beam_size=30,
random_sampling_topk=1,
random_sampling_temp=1,
stepwise_penalty=None,
dump_beam=False,
block_ngram_repeat=0,
ignore_when_blocking=frozenset(),
replace_unk=False,
phrase_table="",
data_type="text",
verbose=False,
report_bleu=False,
report_rouge=False,
report_time=False,
copy_attn=False,
global_scorer=None,
out_file=None,
report_score=True,
logger=None,
seed=-1):
self.model = model
self.fields = fields
tgt_field = dict(self.fields)["tgt"].base_field
self._tgt_vocab = tgt_field.vocab
self._tgt_eos_idx = self._tgt_vocab.stoi[tgt_field.eos_token]
self._tgt_pad_idx = self._tgt_vocab.stoi[tgt_field.pad_token]
self._tgt_bos_idx = self._tgt_vocab.stoi[tgt_field.init_token]
self._tgt_unk_idx = self._tgt_vocab.stoi[tgt_field.unk_token]
self._tgt_vocab_len = len(self._tgt_vocab)
self._gpu = gpu
self._use_cuda = gpu > -1
self._dev = torch.device("cuda", self._gpu) \
if self._use_cuda else torch.device("cpu")
self.n_best = n_best
self.max_length = max_length
self.beam_size = beam_size
self.random_sampling_temp = random_sampling_temp
self.sample_from_topk = random_sampling_topk
self.min_length = min_length
self.ratio = ratio
self.stepwise_penalty = stepwise_penalty
self.dump_beam = dump_beam
self.block_ngram_repeat = block_ngram_repeat
self.ignore_when_blocking = ignore_when_blocking
self._exclusion_idxs = {
self._tgt_vocab.stoi[t] for t in self.ignore_when_blocking}
self.src_reader = src_reader
self.tgt_reader = tgt_reader
self.replace_unk = replace_unk
if self.replace_unk and not self.model.decoder.attentional:
raise ValueError(
"replace_unk requires an attentional decoder.")
self.phrase_table = phrase_table
self.data_type = data_type
self.verbose = verbose
self.report_bleu = report_bleu
self.report_rouge = report_rouge
self.report_time = report_time
self.copy_attn = copy_attn
self.global_scorer = global_scorer
if self.global_scorer.has_cov_pen and \
not self.model.decoder.attentional:
raise ValueError(
"Coverage penalty requires an attentional decoder.")
self.out_file = out_file
self.report_score = report_score
self.logger = logger
self.use_filter_pred = False
self._filter_pred = None
# for debugging
self.beam_trace = self.dump_beam != ""
self.beam_accum = None
if self.beam_trace:
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
set_random_seed(seed, self._use_cuda)
@classmethod
def from_opt(
cls,
model,
fields,
opt,
model_opt,
global_scorer=None,
out_file=None,
report_score=True,
logger=None):
"""Alternate constructor.
Args:
model (onmt.modules.NMTModel): See :func:`__init__()`.
fields (dict[str, torchtext.data.Field]): See
:func:`__init__()`.
opt (argparse.Namespace): Command line options
model_opt (argparse.Namespace): Command line options saved with
the model checkpoint.
global_scorer (onmt.translate.GNMTGlobalScorer): See
:func:`__init__()`..
out_file (TextIO or codecs.StreamReaderWriter): See
:func:`__init__()`.
report_score (bool) : See :func:`__init__()`.
logger (logging.Logger or NoneType): See :func:`__init__()`.
"""
src_reader = inputters.str2reader[opt.data_type].from_opt(opt)
tgt_reader = inputters.str2reader["text"].from_opt(opt)
return cls(
model,
fields,
src_reader,
tgt_reader,
gpu=opt.gpu,
n_best=opt.n_best,
min_length=opt.min_length,
max_length=opt.max_length,
ratio=opt.ratio,
beam_size=opt.beam_size,
random_sampling_topk=opt.random_sampling_topk,
random_sampling_temp=opt.random_sampling_temp,
stepwise_penalty=opt.stepwise_penalty,
dump_beam=opt.dump_beam,
block_ngram_repeat=opt.block_ngram_repeat,
ignore_when_blocking=set(opt.ignore_when_blocking),
replace_unk=opt.replace_unk,
phrase_table=opt.phrase_table,
data_type=opt.data_type,
verbose=opt.verbose,
report_bleu=opt.report_bleu,
report_rouge=opt.report_rouge,
report_time=opt.report_time,
copy_attn=model_opt.copy_attn,
global_scorer=global_scorer,
out_file=out_file,
report_score=report_score,
logger=logger,
seed=opt.seed)
def _log(self, msg):
if self.logger:
self.logger.info(msg)
else:
# print(msg)
pass
def _gold_score(self, batch, memory_bank, src_lengths, src_vocabs,
use_src_map, enc_states, batch_size, src):
if "tgt" in batch.__dict__:
gs = self._score_target(
batch, memory_bank, src_lengths, src_vocabs,
batch.src_map if use_src_map else None)
self.model.decoder.init_state(src, memory_bank, enc_states)
else:
gs = [0] * batch_size
return gs
def translate(
self,
src,
tgt=None,
src_dir=None,
batch_size=None,
batch_type="sents",
attn_debug=False,
phrase_table=""):
"""Translate content of ``src`` and get gold scores from ``tgt``.
Args:
src: See :func:`self.src_reader.read()`.
tgt: See :func:`self.tgt_reader.read()`.
src_dir: See :func:`self.src_reader.read()` (only relevant
for certain types of data).
batch_size (int): size of examples per mini-batch
attn_debug (bool): enables the attention logging
Returns:
(`list`, `list`)
* all_scores is a list of `batch_size` lists of `n_best` scores
* all_predictions is a list of `batch_size` lists
of `n_best` predictions
"""
if batch_size is None:
raise ValueError("batch_size must be set")
data = inputters.Dataset(
self.fields,
readers=([self.src_reader, self.tgt_reader]
if tgt else [self.src_reader]),
data=[("src", src), ("tgt", tgt)] if tgt else [("src", src)],
dirs=[src_dir, None] if tgt else [src_dir],
sort_key=inputters.str2sortkey[self.data_type],
filter_pred=self._filter_pred
)
data_iter = inputters.OrderedIterator(
dataset=data,
device=self._dev,
batch_size=batch_size,
batch_size_fn=max_tok_len if batch_type == "tokens" else None,
train=False,
sort=False,
sort_within_batch=True,
shuffle=False
)
xlation_builder = onmt.translate.TranslationBuilder(
data, self.fields, self.n_best, self.replace_unk, tgt,
self.phrase_table
)
# Statistics
counter = count(1)
pred_score_total, pred_words_total = 0, 0
gold_score_total, gold_words_total = 0, 0
all_scores = []
all_predictions = []
start_time = time.time()
for batch in data_iter:
batch_data = self.translate_batch(
batch, data.src_vocabs, attn_debug
)
translations = xlation_builder.from_batch(batch_data)
for trans in translations:
all_scores += [trans.pred_scores[:self.n_best]]
pred_score_total += trans.pred_scores[0]
pred_words_total += len(trans.pred_sents[0])
if tgt is not None:
gold_score_total += trans.gold_score
gold_words_total += len(trans.gold_sent) + 1
n_best_preds = [" ".join(pred)
for pred in trans.pred_sents[:self.n_best]]
all_predictions += [n_best_preds]
self.out_file.write('\n'.join(n_best_preds) + '\n')
self.out_file.flush()
if self.verbose:
sent_number = next(counter)
output = trans.log(sent_number)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
if attn_debug:
preds = trans.pred_sents[0]
preds.append('</s>')
attns = trans.attns[0].tolist()
if self.data_type == 'text':
srcs = trans.src_raw
else:
srcs = [str(item) for item in range(len(attns[0]))]
header_format = "{:>10.10} " + "{:>10.7} " * len(srcs)
row_format = "{:>10.10} " + "{:>10.7f} " * len(srcs)
output = header_format.format("", *srcs) + '\n'
for word, row in zip(preds, attns):
max_index = row.index(max(row))
row_format = row_format.replace(
"{:>10.7f} ", "{:*>10.7f} ", max_index + 1)
row_format = row_format.replace(
"{:*>10.7f} ", "{:>10.7f} ", max_index)
output += row_format.format(word, *row) + '\n'
row_format = "{:>10.10} " + "{:>10.7f} " * len(srcs)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
end_time = time.time()
if self.report_score:
msg = self._report_score('PRED', pred_score_total,
pred_words_total)
self._log(msg)
if tgt is not None:
msg = self._report_score('GOLD', gold_score_total,
gold_words_total)
self._log(msg)
if self.report_bleu:
msg = self._report_bleu(tgt)
self._log(msg)
if self.report_rouge:
msg = self._report_rouge(tgt)
self._log(msg)
if self.report_time:
total_time = end_time - start_time
self._log("Total translation time (s): %f" % total_time)
self._log("Average translation time (s): %f" % (
total_time / len(all_predictions)))
self._log("Tokens per second: %f" % (
pred_words_total / total_time))
if self.dump_beam:
import json
json.dump(self.translator.beam_accum,
codecs.open(self.dump_beam, 'w', 'utf-8'))
return all_scores, all_predictions
def translate_batch(self, batch, src_vocabs, attn_debug):
"""Translate a batch of sentences."""
with torch.no_grad():
if self.beam_size == 1:
decode_strategy = GreedySearch(
pad=self._tgt_pad_idx,
bos=self._tgt_bos_idx,
eos=self._tgt_eos_idx,
batch_size=batch.batch_size,
min_length=self.min_length, max_length=self.max_length,
block_ngram_repeat=self.block_ngram_repeat,
exclusion_tokens=self._exclusion_idxs,
return_attention=attn_debug or self.replace_unk,
sampling_temp=self.random_sampling_temp,
keep_topk=self.sample_from_topk)
else:
# TODO: support these blacklisted features
assert not self.dump_beam
decode_strategy = BeamSearch(
self.beam_size,
batch_size=batch.batch_size,
pad=self._tgt_pad_idx,
bos=self._tgt_bos_idx,
eos=self._tgt_eos_idx,
n_best=self.n_best,
global_scorer=self.global_scorer,
min_length=self.min_length, max_length=self.max_length,
return_attention=attn_debug or self.replace_unk,
block_ngram_repeat=self.block_ngram_repeat,
exclusion_tokens=self._exclusion_idxs,
stepwise_penalty=self.stepwise_penalty,
ratio=self.ratio)
return self._translate_batch_with_strategy(batch, src_vocabs,
decode_strategy)
def _run_encoder(self, batch):
src, src_lengths = batch.src if isinstance(batch.src, tuple) \
else (batch.src, None)
enc_states, memory_bank, src_lengths = self.model.encoder(
src, src_lengths)
if src_lengths is None:
assert not isinstance(memory_bank, tuple), \
'Ensemble decoding only supported for text data'
src_lengths = torch.Tensor(batch.batch_size) \
.type_as(memory_bank) \
.long() \
.fill_(memory_bank.size(0))
return src, enc_states, memory_bank, src_lengths
def _decode_and_generate(
self,
decoder_in,
memory_bank,
batch,
src_vocabs,
memory_lengths,
src_map=None,
step=None,
batch_offset=None):
if self.copy_attn:
# Turn any copied words into UNKs.
decoder_in = decoder_in.masked_fill(
decoder_in.gt(self._tgt_vocab_len - 1), self._tgt_unk_idx
)
# Decoder forward, takes [tgt_len, batch, nfeats] as input
# and [src_len, batch, hidden] as memory_bank
# in case of inference tgt_len = 1, batch = beam times batch_size
# in case of Gold Scoring tgt_len = actual length, batch = 1 batch
dec_out, dec_attn = self.model.decoder(
decoder_in, memory_bank, memory_lengths=memory_lengths, step=step
)
# Generator forward.
if not self.copy_attn:
if "std" in dec_attn:
attn = dec_attn["std"]
else:
attn = None
log_probs = self.model.generator(dec_out.squeeze(0))
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
else:
attn = dec_attn["copy"]
scores = self.model.generator(dec_out.view(-1, dec_out.size(2)),
attn.view(-1, attn.size(2)),
src_map)
# here we have scores [tgt_lenxbatch, vocab] or [beamxbatch, vocab]
if batch_offset is None:
scores = scores.view(-1, batch.batch_size, scores.size(-1))
scores = scores.transpose(0, 1).contiguous()
else:
scores = scores.view(-1, self.beam_size, scores.size(-1))
scores = collapse_copy_scores(
scores,
batch,
self._tgt_vocab,
src_vocabs,
batch_dim=0,
batch_offset=batch_offset
)
scores = scores.view(decoder_in.size(0), -1, scores.size(-1))
log_probs = scores.squeeze(0).log()
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
return log_probs, attn
def _translate_batch_with_strategy(
self,
batch,
src_vocabs,
decode_strategy):
"""Translate a batch of sentences step by step using cache.
Args:
batch: a batch of sentences, yield by data iterator.
src_vocabs (list): list of torchtext.data.Vocab if can_copy.
decode_strategy (DecodeStrategy): A decode strategy to use for
generate translation step by step.
Returns:
results (dict): The translation results.
"""
# (0) Prep the components of the search.
use_src_map = self.copy_attn
parallel_paths = decode_strategy.parallel_paths # beam_size
batch_size = batch.batch_size
# (1) Run the encoder on the src.
src, enc_states, memory_bank, src_lengths = self._run_encoder(batch)
self.model.decoder.init_state(src, memory_bank, enc_states)
results = {
"predictions": None,
"scores": None,
"attention": None,
"batch": batch,
"gold_score": self._gold_score(
batch, memory_bank, src_lengths, src_vocabs, use_src_map,
enc_states, batch_size, src)}
# (2) prep decode_strategy. Possibly repeat src objects.
src_map = batch.src_map if use_src_map else None
fn_map_state, memory_bank, memory_lengths, src_map = \
decode_strategy.initialize(memory_bank, src_lengths, src_map)
if fn_map_state is not None:
self.model.decoder.map_state(fn_map_state)
# (3) Begin decoding step by step:
for step in range(decode_strategy.max_length):
decoder_input = decode_strategy.current_predictions.view(1, -1, 1)
log_probs, attn = self._decode_and_generate(
decoder_input,
memory_bank,
batch,
src_vocabs,
memory_lengths=memory_lengths,
src_map=src_map,
step=step,
batch_offset=decode_strategy.batch_offset)
decode_strategy.advance(log_probs, attn)
any_finished = decode_strategy.is_finished.any()
if any_finished:
decode_strategy.update_finished()
if decode_strategy.done:
break
select_indices = decode_strategy.select_indices
if any_finished:
# Reorder states.
if isinstance(memory_bank, tuple):
memory_bank = tuple(x.index_select(1, select_indices)
for x in memory_bank)
else:
memory_bank = memory_bank.index_select(1, select_indices)
memory_lengths = memory_lengths.index_select(0, select_indices)
if src_map is not None:
src_map = src_map.index_select(1, select_indices)
if parallel_paths > 1 or any_finished:
self.model.decoder.map_state(
lambda state, dim: state.index_select(dim, select_indices))
results["scores"] = decode_strategy.scores
results["predictions"] = decode_strategy.predictions
results["attention"] = decode_strategy.attention
return results
def _score_target(self, batch, memory_bank, src_lengths,
src_vocabs, src_map):
tgt = batch.tgt
tgt_in = tgt[:-1]
log_probs, attn = self._decode_and_generate(
tgt_in, memory_bank, batch, src_vocabs,
memory_lengths=src_lengths, src_map=src_map)
log_probs[:, :, self._tgt_pad_idx] = 0
gold = tgt[1:]
gold_scores = log_probs.gather(2, gold)
gold_scores = gold_scores.sum(dim=0).view(-1)
return gold_scores
def _report_score(self, name, score_total, words_total):
if words_total == 0:
msg = "%s No words predicted" % (name,)
else:
msg = ("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, score_total / words_total,
name, math.exp(-score_total / words_total)))
return msg
def _report_bleu(self, tgt_path):
import subprocess
base_dir = os.path.abspath(__file__ + "/../../..")
# Rollback pointer to the beginning.
self.out_file.seek(0)
print()
res = subprocess.check_output(
"perl %s/tools/multi-bleu.perl %s" % (base_dir, tgt_path),
stdin=self.out_file, shell=True
).decode("utf-8")
msg = ">> " + res.strip()
return msg
def _report_rouge(self, tgt_path):
import subprocess
path = os.path.split(os.path.realpath(__file__))[0]
msg = subprocess.check_output(
"python %s/tools/test_rouge.py -r %s -c STDIN" % (path, tgt_path),
shell=True, stdin=self.out_file
).decode("utf-8").strip()
return msg
| 38.047478 | 79 | 0.563368 |
9b3deadc900c6a8d3c71ad795785a21ab7b044da | 2,747 | py | Python | src/garage/tf/distributions/bernoulli.py | bainro/garage | c5afbb19524792d9bbad9b9741f45e1d48ddca3d | [
"MIT"
] | null | null | null | src/garage/tf/distributions/bernoulli.py | bainro/garage | c5afbb19524792d9bbad9b9741f45e1d48ddca3d | [
"MIT"
] | null | null | null | src/garage/tf/distributions/bernoulli.py | bainro/garage | c5afbb19524792d9bbad9b9741f45e1d48ddca3d | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from garage.tf.distributions.base import Distribution
TINY = 1e-8
class Bernoulli(Distribution):
def __init__(self, dim, name='Bernoulli'):
self._name = name
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars, name='kl_sym'):
with tf.name_scope(name):
old_p = old_dist_info_vars['p']
new_p = new_dist_info_vars['p']
kl = (old_p *
(tf.math.log(old_p + TINY) - tf.math.log(new_p + TINY)) +
(1 - old_p) * (tf.math.log(1 - old_p + TINY) -
tf.math.log(1 - new_p + TINY)))
ndims = kl.get_shape().ndims
return tf.reduce_sum(kl, axis=ndims - 1)
def kl(self, old_dist_info, new_dist_info):
old_p = old_dist_info['p']
new_p = new_dist_info['p']
kl = old_p * (np.log(old_p + TINY) - np.log(new_p + TINY)) \
+ (1 - old_p) \
* (np.log(1 - old_p + TINY) - np.log(1 - new_p + TINY))
return np.sum(kl, axis=-1)
def sample(self, dist_info):
p = np.asarray(dist_info['p'])
return np.cast['int'](
np.random.uniform(low=0., high=1., size=p.shape) < p)
def likelihood_ratio_sym(self,
x_var,
old_dist_info_vars,
new_dist_info_vars,
name='likelihood_ratio_sym'):
with tf.name_scope(name):
old_p = old_dist_info_vars['p']
new_p = new_dist_info_vars['p']
ndims = old_p.get_shape().ndims
return tf.reduce_prod(x_var * new_p / (old_p + TINY) +
(1 - x_var) * (1 - new_p) /
(1 - old_p + TINY),
axis=ndims - 1)
def log_likelihood_sym(self, x_var, dist_info_vars, name='log_likelihood_sym'):
with tf.name_scope(name):
p = dist_info_vars['p']
ndims = p.get_shape().ndims
return tf.reduce_sum(x_var * tf.math.log(p + TINY) +
(1 - x_var) * tf.math.log(1 - p + TINY),
axis=ndims - 1)
def log_likelihood(self, xs, dist_info):
p = dist_info['p']
return np.sum(xs * np.log(p + TINY) + (1 - xs) * np.log(1 - p + TINY),
axis=-1)
def entropy(self, dist_info):
p = dist_info['p']
return np.sum(-p * np.log(p + TINY) - (1 - p) * np.log(1 - p + TINY),
axis=-1)
@property
def dist_info_keys(self):
return ['p']
| 35.217949 | 83 | 0.495086 |
92fb8180b972de53bb253c06e1b38359376ef11e | 1,180 | py | Python | clay/markdown_ext/md_superscript.py | TuxCoder/Clay | 04f15b4d742b14d09df9049dd91cfa4386cba66e | [
"MIT"
] | null | null | null | clay/markdown_ext/md_superscript.py | TuxCoder/Clay | 04f15b4d742b14d09df9049dd91cfa4386cba66e | [
"MIT"
] | null | null | null | clay/markdown_ext/md_superscript.py | TuxCoder/Clay | 04f15b4d742b14d09df9049dd91cfa4386cba66e | [
"MIT"
] | null | null | null | # coding=utf-8
"""
Superscipt extension for Markdown
==================================
Examples:
>>> import markdown
>>> md = markdown.Markdown(extensions=[SuperscriptExtension()])
>>> md.convert('lorem ipsum^1 sit.')
u'<p>lorem ipsum<sup>1</sup> sit.</p>'
>>> md.convert('6.02 x 10^23')
u'<p>6.02 x 10<sup>23</sup></p>'
>>> md.convert('10^(2x + 3).')
u'<p>10<sup>2x + 3</sup>.</p>'
"""
import markdown
from markdown.inlinepatterns import Pattern
from markdown.util import etree, AtomicString
SUPER_RE = r'\^(?:([^\(\s]+)|\(([^\n\)]+)\))'
class SuperscriptPattern(Pattern):
""" Return a superscript Element (`word^2^`). """
def handleMatch(self, m):
supr = m.group(2) or m.group(3)
text = supr
el = etree.Element("sup")
el.text = AtomicString(text)
return el
class SuperscriptExtension(markdown.Extension):
""" Superscript Extension for Python-Markdown.
"""
def extendMarkdown(self, md, md_globals):
""" Replace superscript with SuperscriptPattern """
md.inlinePatterns['superscript'] = SuperscriptPattern(SUPER_RE, md)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23.137255 | 75 | 0.616102 |
c0c91e4430ccbb59fc5f0b146c18d4971a58198b | 1,282 | py | Python | dict_read.py | liaocm/LLHTranslate | c3e62f91b9f8b3594ce07601e123f0757efe63f3 | [
"Apache-2.0"
] | null | null | null | dict_read.py | liaocm/LLHTranslate | c3e62f91b9f8b3594ce07601e123f0757efe63f3 | [
"Apache-2.0"
] | null | null | null | dict_read.py | liaocm/LLHTranslate | c3e62f91b9f8b3594ce07601e123f0757efe63f3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import io, sys
def verify_line(line):
if not line:
return -1
if line == '\n':
return 1
return 0
def read_dict(fname, show_warning=True):
content = dict()
try:
fd = io.open(fname, 'r')
except BaseException:
sys.exit("Unable to open the dictionary.")
line_num = 1
key_line = ""
val_line = ""
while True:
key_line = fd.readline()
check = verify_line(key_line)
if check == -1:
sys.exit("ERROR: Unexpected EOF at line {0}.".format(line_num))
elif check == 1:
break
line_num += 1
key_line = key_line[:-1]
val_line = fd.readline()
check = verify_line(val_line)
if check == -1:
sys.exit("ERROR: Unexpected EOF at line {0}.".format(line_num))
elif check == 1 and show_warning:
print("Warning: No translation found for {0} at line {1}.".format(key_line, line_num))
line_num += 1
val_line = val_line[:-1]
if key_line in content and show_warning:
print("Warning: Collision for {0} at line {1}. Overriding.".format(key_line, line_num - 1))
content[key_line] = val_line
key_line = fd.readline()
if key_line and show_warning:
print("Warning: EOF not found after ending literal at line {0}.".format(line_num))
fd.close()
return content | 26.708333 | 97 | 0.641186 |
02465a2f656014272a2fa4896a41ebcd98e6c5d8 | 1,463 | py | Python | ooobuild/lo/configuration/backend/merge_importer.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/configuration/backend/merge_importer.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/configuration/backend/merge_importer.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.configuration.backend
from .importer import Importer as Importer_6e7810c8
class MergeImporter(Importer_6e7810c8):
"""
Service Class
imports data into a configuration layer by merging with existing data.
No named arguments to com.sun.star.lang.XInitialization.initialize() are supported.
**since**
OOo 1.1.2
See Also:
`API MergeImporter <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1configuration_1_1backend_1_1MergeImporter.html>`_
"""
__ooo_ns__: str = 'com.sun.star.configuration.backend'
__ooo_full_ns__: str = 'com.sun.star.configuration.backend.MergeImporter'
__ooo_type_name__: str = 'service'
__all__ = ['MergeImporter']
| 31.804348 | 147 | 0.74026 |
d201c51658bb23fef026f46026d60002b900ac20 | 1,730 | py | Python | plotly/validators/candlestick/hoverlabel/_font.py | fcollonval/plotly.py | 5c7f100db1af8c82bb740a38ef684955a8ed6d0e | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/validators/candlestick/hoverlabel/_font.py | fcollonval/plotly.py | 5c7f100db1af8c82bb740a38ef684955a8ed6d0e | [
"MIT"
] | null | null | null | plotly/validators/candlestick/hoverlabel/_font.py | fcollonval/plotly.py | 5c7f100db1af8c82bb740a38ef684955a8ed6d0e | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name='font',
parent_name='candlestick.hoverlabel',
**kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Font',
data_docs="""
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
""",
**kwargs
)
| 35.306122 | 68 | 0.537572 |
6fe02521649d88d87fe829b3712574c90316d3f1 | 5,486 | py | Python | detection/dota15_demo.py | chandlerbing65nm/PVT | e171519b2a1a44e36ebdf0732f274a190b50ce29 | [
"Apache-2.0"
] | null | null | null | detection/dota15_demo.py | chandlerbing65nm/PVT | e171519b2a1a44e36ebdf0732f274a190b50ce29 | [
"Apache-2.0"
] | null | null | null | detection/dota15_demo.py | chandlerbing65nm/PVT | e171519b2a1a44e36ebdf0732f274a190b50ce29 | [
"Apache-2.0"
] | null | null | null | import cv2
import math
import mmcv
import numpy as np
import os
import pdb
from mmcv import Config
from tqdm import tqdm
from argparse import ArgumentParser
import DOTA_devkit.polyiou as polyiou
from mmdet.apis import init_detector, inference_detector, draw_poly_detections
from mmdet.datasets import get_dataset
dota15_colormap = [
(54, 67, 244),
(99, 30, 233),
(176, 39, 156),
(183, 58, 103),
(181, 81, 63),
(243, 150, 33),
(212, 188, 0),
(136, 150, 0),
(80, 175, 76),
(74, 195, 139),
(57, 220, 205),
(59, 235, 255),
(0, 152, 255),
(34, 87, 255),
(72, 85, 121),
(139, 125, 96)]
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('--out', help='output image')
args = parser.parse_args()
return args
def py_cpu_nms_poly_fast_np(dets, thresh):
obbs = dets[:, 0:-1]
x1 = np.min(obbs[:, 0::2], axis=1)
y1 = np.min(obbs[:, 1::2], axis=1)
x2 = np.max(obbs[:, 0::2], axis=1)
y2 = np.max(obbs[:, 1::2], axis=1)
scores = dets[:, 8]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
polys = []
for i in range(len(dets)):
tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1],
dets[i][2], dets[i][3],
dets[i][4], dets[i][5],
dets[i][6], dets[i][7]])
polys.append(tm_polygon)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
ovr = []
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
hbb_inter = w * h
hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)
h_inds = np.where(hbb_ovr > 0)[0]
tmp_order = order[h_inds + 1]
for j in range(tmp_order.size):
iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]])
hbb_ovr[h_inds[j]] = iou
try:
if math.isnan(ovr[0]):
pdb.set_trace()
except:
pass
inds = np.where(hbb_ovr <= thresh)[0]
order = order[inds + 1]
return keep
class DetectorModel():
def __init__(self,
config_file,
checkpoint_file):
# init RoITransformer
self.config_file = config_file
self.checkpoint_file = checkpoint_file
self.cfg = Config.fromfile(self.config_file)
self.data_test = self.cfg.data['test']
self.dataset = get_dataset(self.data_test)
self.classnames = self.dataset.CLASSES
self.model = init_detector(config_file, checkpoint_file, device='cuda:0')
def inference_single(self, imagname, slide_size, chip_size):
img = mmcv.imread(imagname)
height, width, channel = img.shape
slide_h, slide_w = slide_size
hn, wn = chip_size
# TODO: check the corner case
# import pdb; pdb.set_trace()
total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))]
for i in tqdm(range(int(width / slide_w + 1))):
for j in range(int(height / slide_h) + 1):
subimg = np.zeros((hn, wn, channel))
# print('i: ', i, 'j: ', j)
chip = img[j * slide_h:j * slide_h + hn, i * slide_w:i * slide_w + wn, :3]
subimg[:chip.shape[0], :chip.shape[1], :] = chip
chip_detections = inference_detector(self.model, subimg)
# print('result: ', result)
for cls_id, name in enumerate(self.classnames):
chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:, :8][:, ::2] + i * slide_w
chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:, :8][:, 1::2] + j * slide_h
# import pdb;pdb.set_trace()
try:
total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id]))
except:
import pdb;
pdb.set_trace()
# nms
for i in range(len(self.classnames)):
keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1)
total_detections[i] = total_detections[i][keep]
return total_detections
def inference_single_vis(self, srcpath, dstpath, slide_size, chip_size):
detections = self.inference_single(srcpath, slide_size, chip_size)
img = draw_poly_detections(srcpath, detections, self.classnames, scale=1, threshold=0.2,
colormap=dota15_colormap)
cv2.imwrite(dstpath, img)
if __name__ == '__main__':
args = parse_args()
model = DetectorModel(args.config, args.checkpoint)
img_dir = args.img
out_dir = args.out
img_names = os.listdir(img_dir)
for img_name in img_names:
print(img_name)
img_path = os.path.join(img_dir, img_name)
out_path = os.path.join(out_dir, img_name)
model.inference_single_vis(img_path, out_path, (512, 512), (1024, 1024))
| 35.166667 | 118 | 0.556325 |
53c67ddc4d522fcc2aa592daeb46c15301b5636a | 7,276 | py | Python | mealpy/dummy/PIO.py | JokerHB/mealpy | 4bd00f47ed575d01f246d5fd0ef306d7c1fa5a5f | [
"MIT"
] | 162 | 2020-08-31T10:13:06.000Z | 2022-03-31T09:38:19.000Z | mealpy/dummy/PIO.py | JokerHB/mealpy | 4bd00f47ed575d01f246d5fd0ef306d7c1fa5a5f | [
"MIT"
] | 51 | 2020-09-13T10:46:31.000Z | 2022-03-30T06:12:08.000Z | mealpy/dummy/PIO.py | JokerHB/mealpy | 4bd00f47ed575d01f246d5fd0ef306d7c1fa5a5f | [
"MIT"
] | 58 | 2020-09-12T13:29:18.000Z | 2022-03-31T09:38:21.000Z | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 11:41, 08/04/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from numpy.random import uniform
from numpy import exp, sum
from mealpy.optimizer import Root
class BasePIO(Root):
"""
My improved version of: Pigeon-Inspired Optimization (PIO)
(Pigeon-inspired optimization: a new swarm intelligence optimizer for air robot path planning)
Link:
+ DOI: 10.1108/IJICC-02-2014-0005
Noted:
+ The paper is very unclear most the parameters and the flow of algorithm (some points even wrong)
+ This is my version, I changed almost everything, even parameters and flow of algorithm
+ Also the personal best no need in this version (So it is now much different than PSO)
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, R=0.2, n_switch=0.75, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch # Nc1 + Nc2
self.pop_size = pop_size # Np
self.R = R
if n_switch < 1:
self.n_switch = int(self.epoch * n_switch)
else:
self.n_switch = int(n_switch) # Represent Nc1 and Nc2 in the paper
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
g_best = self.get_global_best_solution(pop=pop, id_fit=self.ID_FIT, id_best=self.ID_MIN_PROB)
list_velocity = uniform(self.lb, self.ub, (self.pop_size, self.problem_size))
n_p = int(self.pop_size / 2)
for epoch in range(0, self.epoch):
if epoch < self.n_switch: # Map and compass operations
for i in range(0, self.pop_size):
v_new = list_velocity[i] * exp(-self.R * (epoch + 1)) + uniform() * (g_best[self.ID_POS] - pop[i][self.ID_POS])
x_new = pop[i][self.ID_POS] + v_new
x_new = self.amend_position_random(x_new)
fit = self.get_fitness_position(x_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [x_new, fit]
list_velocity[i] = v_new
else: # Landmark operations
pop = sorted(pop, key=lambda item: item[self.ID_FIT])
list_fit = [pop[i][self.ID_FIT] for i in range(0, n_p)]
list_pos = [pop[i][self.ID_FIT] for i in range(0, n_p)]
frac_up = sum([list_fit[i] * list_pos[i] for i in range(0, n_p)], axis=0)
frac_down = n_p * sum(list_fit)
x_c = frac_up / frac_down
## Move all pigeon based on target x_c
for i in range(0, self.pop_size):
x_new = pop[i][self.ID_POS] + uniform() * (x_c - pop[i][self.ID_POS])
fit_new = self.get_fitness_position(x_new)
if fit_new < pop[i][self.ID_FIT]:
pop[i] = [x_new, fit_new]
# Update the global best
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class LevyPIO(BasePIO):
"""
My levy version of: Pigeon-Inspired Optimization (PIO)
(Pigeon-inspired optimization: a new swarm intelligence optimizer for air robot path planning)
Noted:
+ The paper is very unclear most the parameters and the flow of algorithm (some points even wrong)
+ This is my version, I changed almost everything, even parameters and flow of algorithm
+ Also the personal best no need in this version (So it is now much different than PSO)
+ I applied the levy-flight here for more robust
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, R=0.2, n_switch=0.75, **kwargs):
BasePIO.__init__(self, obj_func, lb, ub, verbose, epoch, pop_size, R, n_switch, kwargs = kwargs)
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
g_best = self.get_global_best_solution(pop=pop, id_fit=self.ID_FIT, id_best=self.ID_MIN_PROB)
list_velocity = uniform(self.lb, self.ub, (self.pop_size, self.problem_size))
n_p = int(self.pop_size / 2)
for epoch in range(0, self.epoch):
if epoch < self.n_switch: # Map and compass operations
for i in range(0, self.pop_size):
v_new = list_velocity[i] * exp(-self.R * (epoch + 1)) + uniform() * (g_best[self.ID_POS] - pop[i][self.ID_POS])
x_new = pop[i][self.ID_POS] + v_new
x_new = self.amend_position_random(x_new)
fit_new = self.get_fitness_position(x_new)
if fit_new < pop[i][self.ID_FIT]:
pop[i] = [x_new, fit_new]
list_velocity[i] = v_new
else: # Landmark operations
pop = sorted(pop, key=lambda item: item[self.ID_FIT])
list_fit = [pop[i][self.ID_FIT] for i in range(0, n_p)]
list_pos = [pop[i][self.ID_FIT] for i in range(0, n_p)]
frac_up = sum([list_fit[i] * list_pos[i] for i in range(0, n_p)], axis=0)
frac_down = n_p * sum(list_fit)
x_c = frac_up / frac_down
## Move all pigeon based on target x_c
for i in range(0, self.pop_size):
if uniform() < 0.5:
x_new = pop[i][self.ID_POS] + uniform() * (x_c - pop[i][self.ID_POS])
else:
x_new = self.levy_flight(epoch, pop[i][self.ID_POS], g_best[self.ID_POS])
fit_new = self.get_fitness_position(x_new)
if fit_new < pop[i][self.ID_FIT]:
pop[i] = [x_new, fit_new]
# Update the global best
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
| 53.5 | 131 | 0.529962 |
4ce39ad8be08a0d7a389868b75324c47d637fec9 | 39,651 | py | Python | datalad/metadata/aggregate.py | jelmer/datalad | fedc04867d87e0191bd500991d0df97e97113457 | [
"MIT"
] | null | null | null | datalad/metadata/aggregate.py | jelmer/datalad | fedc04867d87e0191bd500991d0df97e97113457 | [
"MIT"
] | null | null | null | datalad/metadata/aggregate.py | jelmer/datalad | fedc04867d87e0191bd500991d0df97e97113457 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Interface for aggregating metadata
"""
__docformat__ = 'restructuredtext'
import logging
import os
from os import makedirs
from os import listdir
import os.path as op
from os.path import join as opj
from os.path import dirname
from os.path import relpath
from os.path import isabs
from os.path import exists
from os.path import lexists
from os.path import curdir
from hashlib import md5
import shutil
# API commands we need
from datalad.distribution.get import Get
from datalad.distribution.remove import Remove
from datalad.distribution.subdatasets import Subdatasets
from datalad.interface.unlock import Unlock
import datalad
from datalad.interface.annotate_paths import AnnotatePaths
from datalad.interface.base import Interface
from datalad.interface.utils import eval_results
from datalad.interface.utils import discover_dataset_trace_to_targets
from datalad.interface.save import Save
from datalad.interface.base import build_doc
from datalad.interface.common_opts import recursion_limit, recursion_flag
from datalad.interface.common_opts import nosave_opt
from datalad.interface.results import get_status_dict
from datalad.distribution.dataset import Dataset
from datalad.metadata.metadata import agginfo_relpath
from datalad.metadata.metadata import exclude_from_metadata
from datalad.metadata.metadata import get_metadata_type
from datalad.metadata.metadata import _load_json_object
from datalad.metadata.metadata import _get_metadata
from datalad.metadata.metadata import _get_metadatarelevant_paths
from datalad.metadata.metadata import _get_containingds_from_agginfo
from datalad.distribution.dataset import datasetmethod, EnsureDataset, require_dataset
from datalad.support.param import Parameter
from datalad.support.constraints import EnsureStr
from datalad.support.constraints import EnsureNone
from datalad.support.constraints import EnsureBool
from datalad.support.constraints import EnsureChoice
from datalad.support.gitrepo import GitRepo
from datalad.support.annexrepo import AnnexRepo
from datalad.support import json_py
from datalad.utils import path_is_subpath
from datalad.utils import assure_list
lgr = logging.getLogger('datalad.metadata.aggregate')
# TODO filepath_info is obsolete
location_keys = ('dataset_info', 'content_info', 'filepath_info')
def _get_dsinfo_from_aggmetadata(ds_path, path, recursive, db):
"""Grab info on aggregated metadata for a path from a given dataset.
The actual info is stored in a `db` dict under the absolute path
of the dataset that contains the query path, plus any subdataset
in case of recursion (with their own DB entries).
Parameters
----------
ds : Dataset
source dataset
path : str
absolute path for which to obtain metadata
recursive : bool
Returns
-------
str or list
A string is an error message, a list contains all absolute paths for
all datasets on which info was put into the DB.
"""
info_fpath = opj(ds_path, agginfo_relpath)
info_basepath = dirname(info_fpath)
# TODO cache these
agginfos = _load_json_object(info_fpath)
def _ensure_abs_obj_location(rec):
# object location in the DB must be absolute so we can copy easily
# to all relevant datasets
for key in location_keys:
if key in rec and not isabs(rec[key]):
rec[key] = opj(info_basepath, rec[key])
return rec
rpath = relpath(path, start=ds_path)
seed_ds = _get_containingds_from_agginfo(agginfos, rpath)
if seed_ds is None:
# nothing found
# this will be the message in the result for the query path
# and could be a tuple
return ("No matching aggregated metadata for path '%s' in Dataset at %s", rpath, ds_path)
# easy peasy
seed_abs = opj(ds_path, seed_ds)
db[seed_abs] = _ensure_abs_obj_location(agginfos[seed_ds])
hits = [seed_abs]
if not recursive:
return hits
# a little more complicated: we need to loop over all subdataset
# records and pick the ones that are underneath the seed
for agginfo_path in agginfos:
if path_is_subpath(agginfo_path, seed_ds):
absp = opj(ds_path, agginfo_path)
db[absp] = _ensure_abs_obj_location(agginfos[agginfo_path])
hits.append(absp)
# TODO we must keep the info on these recursively discovered datasets
# somewhere, because we cannot rediscover them on the filesystem
# when updating the datasets later on
return hits
def _dump_extracted_metadata(agginto_ds, aggfrom_ds, db, to_save, force_extraction):
"""Dump metadata from a dataset into object in the metadata store of another
Info on the metadata objects is placed into a DB dict under the
absolute path of the dataset whose metadata was aggregated.
Parameters
----------
agginto_ds : Dataset
aggfrom_ds : Dataset
db : dict
"""
subds_relpaths = aggfrom_ds.subdatasets(result_xfm='relpaths', return_type='list')
# figure out a "state" of the dataset wrt its metadata that we are describing
# 1. the latest commit that changed any file for which we could have native metadata
refcommit = _get_latest_refcommit(aggfrom_ds, subds_relpaths)
objid = refcommit if refcommit else ''
# 2, our own dataset-global metadata and the dataset config
for tfile in (
opj(aggfrom_ds.path, '.datalad', 'metadata', 'dataset.json'),
opj(aggfrom_ds.path, '.datalad', 'config')):
if exists(tfile):
objid += md5(open(tfile, 'r').read().encode()).hexdigest()
# 3. potential annex-based metadata
# XXX TODO shouldn't this be the annex extractor?
if isinstance(aggfrom_ds, AnnexRepo) and \
aggfrom_ds.config.obtain(
'datalad.metadata.aggregate-content-datalad-core',
default=True,
valtype=EnsureBool()):
# if there is no annex metadata, this will come out empty,
# hence hash would be same as for a plain GitRepo
# and no, we cannot use the shasum of the annex branch,
# because this will change even when no metadata has changed
timestamps, _ = aggfrom_ds.repo._run_annex_command(
'metadata',
'.',
'-g', 'lastchanged')
objid += timestamps.strip()
if not objid:
lgr.debug('%s has no metadata-relevant content', aggfrom_ds)
else:
lgr.debug(
'Dump metadata of %s into %s',
aggfrom_ds, agginto_ds)
# check if we already have in store what we are about to create
old_agginfo = db.get(aggfrom_ds.path, {})
agginfo = {}
# dataset global
if aggfrom_ds.id:
agginfo['id'] = aggfrom_ds.id
agginfo['refcommit'] = refcommit
# put in DB
db[aggfrom_ds.path] = agginfo
if not objid:
# this is no error, there is simply no metadata whatsoever
return False
# shorten to MD5sum
objid = md5(objid.encode()).hexdigest()
# assemble info on the metadata extraction and storage
# label type targetds storage method
metasources = {'ds': {'type': 'dataset', 'targetds': agginto_ds, 'dumper': json_py.dump}}
# do not store content metadata if either the source or the target dataset
# do not want it
# TODO this AND was an OR before (wrong), misses a test
if aggfrom_ds.config.obtain(
'datalad.metadata.store-aggregate-content',
default=True,
valtype=EnsureBool()) and \
agginto_ds.config.obtain(
'datalad.metadata.store-aggregate-content',
default=True,
valtype=EnsureBool()):
metasources['cn'] = {
'type': 'content',
'targetds': agginto_ds,
'dumper': json_py.dump2xzstream}
# check if we have the extracted metadata for this state already
# either in the source or in the destination dataset
metafound = {
s: d
# look in targetds last to not have to move things
# unnecessarily
for d in (aggfrom_ds, agginto_ds)
for s in metasources
# important to test for lexists() as we do not need to
# or want to `get()` metadata files for this test
# info on identity is sufficient
if op.lexists(
op.join(
d.path,
dirname(agginfo_relpath),
_get_obj_location(objid, s)))
} if not force_extraction else False
if not metafound:
lgr.debug('Performing metadata extraction from %s', aggfrom_ds)
# no metadata found -> extract
# this places metadata dump files into the configured
# target dataset and lists them in `to_save`, as well
# as updates the `db` record for `aggfrom_ds`
return _extract_metadata(
agginto_ds,
aggfrom_ds,
db,
to_save,
objid,
metasources,
refcommit,
subds_relpaths)
# we did not actually run an extraction, so we need to
# assemble an aggregation record from the existing pieces
# that we found
# simple case: the target dataset has all the records already:
if all(d is agginto_ds for s, d in metafound.items()):
lgr.debug('Sticking with up-to-date metadata for %s', aggfrom_ds)
# no change, use old record from the target dataset
db[aggfrom_ds.path] = old_agginfo
# no error
return False
else:
lgr.debug('Reusing previously extracted metadata for %s', aggfrom_ds)
# we need to move the metadata dump(s) into the target dataset
objrelpaths = {
label: op.join(
dirname(agginfo_relpath),
_get_obj_location(objid, label))
for label in metafound
}
# make sure all the to-be-moved metadata records are present
# locally
aggfrom_ds.get(
# prep annotated path records to speed up the call
path=[dict(path=op.join(aggfrom_ds.path, p),
parentds=aggfrom_ds.path,
type='file')
for p in objrelpaths.values()],
result_renderer='disabled')
# actually copy dump files
for objrelpath in objrelpaths.values():
objpath = op.join(agginto_ds.path, objrelpath)
objdir = dirname(objpath)
if not exists(objdir):
makedirs(objdir)
# XXX TODO once we have a command that can copy/move files
# from one dataset to another including file availability
# info, this should be used here
shutil.copyfile(
op.join(aggfrom_ds.path, objrelpath),
objpath)
# mark for saving
to_save.append(dict(
path=objpath,
parentds=agginto_ds.path,
type='file'))
# lastly get 'self' aggregation record from source dataset and
# use in target dataset
db[aggfrom_ds.path] = _load_agginfo_db(aggfrom_ds.path)[aggfrom_ds.path]
return False
def _extract_metadata(agginto_ds, aggfrom_ds, db, to_save, objid, metasources, refcommit, subds_relpaths):
# we will replace any conflicting info on this dataset with fresh stuff
agginfo = db.get(aggfrom_ds.path, {})
# paths to extract from
relevant_paths = sorted(_get_metadatarelevant_paths(aggfrom_ds, subds_relpaths))
# get extractors to engage from source dataset
nativetypes = ['datalad_core', 'annex'] + assure_list(get_metadata_type(aggfrom_ds))
# store esssential extraction config in dataset record
agginfo['extractors'] = nativetypes
agginfo['datalad_version'] = datalad.__version__
# perform the actual extraction
dsmeta, contentmeta, errored = _get_metadata(
aggfrom_ds,
nativetypes,
# None indicates to honor a datasets per-extractor configuration and to be
# on by default
global_meta=None,
content_meta=None,
paths=relevant_paths)
meta = {
'ds': dsmeta,
'cn': (dict(contentmeta[k], path=k) for k in sorted(contentmeta))
}
# inject the info which commmit we are describing into the core metadata
# this is done here in order to avoid feeding it all the way down
coremeta = dsmeta.get('datalad_core', {})
version = aggfrom_ds.repo.describe(commitish=refcommit)
if version:
coremeta['version'] = version
coremeta['refcommit'] = refcommit
dsmeta['datalad_core'] = coremeta
# for both types of metadata
for label, props in metasources.items():
dest = props['targetds']
if not meta[label]:
continue
# only write to disk if there is something
objrelpath = _get_obj_location(objid, label)
if props['dumper'] is json_py.dump2xzstream:
objrelpath += '.xz'
# place metadata object into the source dataset
objpath = opj(dest.path, dirname(agginfo_relpath), objrelpath)
# write obj files
if exists(objpath):
dest.unlock(objpath)
elif lexists(objpath):
# if it gets here, we have a symlink that is pointing nowhere
# kill it, to be replaced with the newly aggregated content
dest.repo.remove(objpath)
# TODO actually dump a compressed file when annexing is possible
# to speed up on-demand access
props['dumper'](meta[label], objpath)
# stage for dataset.save()
to_save.append(dict(path=objpath, type='file'))
# important to use abspath here, needs to be rewritten relative to
# all receiving datasets
agginfo['{}_info'.format(props['type'])] = objpath
# overwrite existing info with stuff from just finished extraction
db[aggfrom_ds.path] = agginfo
return errored
def _adj2subtrees(base, adj, subs):
# given a set of parent-child mapping, compute a mapping of each parent
# to all its (grand)children of any depth level
subtrees = dict(adj)
subs = set(subs)
# from bottom up
for ds in sorted(adj, reverse=True):
subtree = []
for sub in subtrees[ds]:
subtree.append(sub)
subtree.extend(subtrees.get(sub, []))
subtrees[ds] = subtree
# give each leaf dataset an entry too
for sub in subs:
if sub not in subtrees and GitRepo.is_valid_repo(sub):
subtrees[sub] = []
return subtrees
def _get_latest_refcommit(ds, subds_relpaths):
"""Find the latest commit that changed any real content
This will ignore anything at or underneath:
- .git
- .datalad
- .gitmodules
- .gitattributes
- any submodule
Returns
-------
str or None
None is return if no commit can be found, or no relevant content
files were found at all. Otherwise the full commit hash if the
last commit that touch any relevant content is returned.
"""
def _filterpaths(basepath, paths, exclude):
final_paths = []
for rp in [opj(basepath, p) if basepath else p for p in paths]:
if rp in exclude:
continue
elif any(path_is_subpath(ep, rp) for ep in exclude):
final_paths.extend(
_filterpaths(rp, listdir(opj(ds.path, rp)), exclude))
pass
else:
final_paths.append(rp)
return final_paths
relevant_paths = _filterpaths(
'',
listdir(ds.path),
# NOTE: this will also ignore datalad's native dataset-global metadata
# rationale: the metadata still describes the dataset content, so
# even if it changes, the description changes, but not the content
# it is describing -> ref commit should be unaffected
list(exclude_from_metadata) + subds_relpaths)
if not relevant_paths:
return None
return ds.repo.get_last_commit_hash(relevant_paths)
def _get_obj_location(hash_str, ref_type):
return opj(
'objects',
hash_str[:2],
'{}-{}'.format(
ref_type,
hash_str[2:]))
def _update_ds_agginfo(refds_path, ds_path, subds_paths, incremental, agginfo_db, to_save):
"""Perform metadata aggregation for ds and a given list of subdataset paths
Parameters
----------
refds_path : str
Absolute path to the reference dataset that aggregate_metadata() was
called on.
ds_path : str
Absolute path to the dataset to have its aggregate info updates
subds_paths : list(str)
Sequence of absolute paths of subdatasets of the to-be-updated dataset,
whose agginfo shall be updated within the to-be-updated dataset.
Any subdataset that is not listed here is assumed to be gone (i.e. no longer
a subdataset at all, not just not locally installed)
incremental : bool
If set, the update will not remove any information on datasets not listed in
subds_paths
agginfo_db : dict
Dictionary with all information on aggregate metadata on all datasets.
Keys are absolute paths of datasets.
to_save : list
List of paths to save eventually. This function will add new paths as
necessary.
"""
ds = Dataset(ds_path)
# location info of aggregate metadata
# aggregate.json
agginfo_fpath = opj(ds_path, agginfo_relpath)
# base path in which aggregate.json and objects is located
agg_base_path = dirname(agginfo_fpath)
# load existing aggregate info dict
# makes sure all file/dataset paths become absolute
# TODO take from cache, once used in _get_dsinfo_from_aggmetadata()
ds_agginfos = _load_agginfo_db(ds_path)
# object locations referenced initially
objlocs_was = set(ai[k]
for ai in ds_agginfos.values()
for k in location_keys
if k in ai)
# track which objects need to be copied (each item is a from/to tuple
objs2copy = []
# for each subdataset (any depth level)
procds_paths = [ds.path] + subds_paths
for dpath in procds_paths:
ds_dbinfo = agginfo_db.get(dpath, {}).copy()
# relative path of the currect dataset within the dataset we are updating
drelpath = relpath(dpath, start=ds.path)
for loclabel in location_keys:
# TODO filepath_info is obsolete
if loclabel == 'filepath_info' and drelpath == curdir:
# do not write a file list into the dataset it is from
if 'filepath_info' in ds_dbinfo:
del ds_dbinfo['filepath_info']
continue
# abspath to object
objloc = ds_dbinfo.get(loclabel, None)
if objloc is None:
continue
# XXX needs to change when layout of object store is changed
# current is ./datalad/metadata/objects/{hash}/{hash}
target_objpath = op.join(agg_base_path, *objloc.split(os.sep)[-3:])
# make sure we copy the file from its current location to where it is
# needed in this dataset
objs2copy.append((
# this needs to turn into an absolute path
# `dpath` will be relative to the reference dataset
#op.normpath(op.join(ds.path, dpath, dirname(agginfo_relpath), objloc)),
objloc,
target_objpath))
# now build needed local relpath
ds_dbinfo[loclabel] = target_objpath
# (re)assign in case record is new
ds_agginfos[dpath] = ds_dbinfo
# remove all entries for which we did not (no longer) have a corresponding
# subdataset to take care of
if not incremental:
ds_agginfos = {k: v
for k, v in ds_agginfos.items()
if k in procds_paths}
# set of metadata objects now referenced
objlocs_is = set(
ai[k]
for sdsrpath, ai in ds_agginfos.items()
for k in location_keys
if k in ai)
objs2add = objlocs_is
# yoh: we appanretly do need to filter the ones to remove - I did
# "git reset --hard HEAD^" and
# aggregate-metadata failed upon next run trying to remove
# an unknown to git file. I am yet to figure out why that
# mattered (hopefully not that reflog is used somehow)
objs2remove = []
for obj in objlocs_was.difference(objlocs_is):
if lexists(obj):
objs2remove.append(obj)
else:
# not really a warning, we don't need it anymore, it is already gone
lgr.debug(
"To-be-deleted metadata object not found, skip deletion (%s)",
obj
)
# secretly remove obsolete object files, not really a result from a
# user's perspective
if not incremental and objs2remove:
ds.remove(
objs2remove,
# Don't use the misleading default commit message of `remove`:
message='[DATALAD] Remove obsolete metadata object files',
# we do not want to drop these files by default, because we would
# loose them for other branches, and earlier tags
# TODO evaluate whether this should be exposed as a switch
# to run an explicit force-drop prior to calling remove()
check=False,
result_renderer=None, return_type=list)
if not objs2add and not refds_path == ds_path:
# this is not the base dataset, make sure to save removal in the
# parentds -- not needed when objects get added, as removal itself
# is already committed
to_save.append(dict(path=ds_path, type='dataset', staged=True))
objs2copy = [(f, t) for f, t in objs2copy if f != t]
# must copy object files to local target destination
# make sure those objects are present
# use the reference dataset to resolve paths, as they might point to
# any location in the dataset tree
Dataset(refds_path).get(
[f for f, t in objs2copy],
result_renderer='disabled')
for copy_from, copy_to in objs2copy:
copy_from = op.join(agg_base_path, copy_from)
copy_to = op.join(agg_base_path, copy_to)
target_dir = dirname(copy_to)
if not exists(target_dir):
makedirs(target_dir)
# TODO we could be more clever (later) and maybe `addurl` (or similar)
# the file from another dataset
if lexists(copy_to):
# no need to unlock, just wipe out and replace
os.remove(copy_to)
shutil.copy(copy_from, copy_to)
to_save.append(
dict(path=agginfo_fpath, type='file', staged=True))
if objs2add:
# they are added standard way, depending on the repo type
ds.add(
[opj(agg_base_path, p) for p in objs2add],
save=False, result_renderer=None, return_type=list)
# queue for save, and mark as staged
to_save.extend(
[dict(path=opj(agg_base_path, p), type='file', staged=True)
for p in objs2add])
# write aggregate info file
if not ds_agginfos:
return
_store_agginfo_db(ds_path, ds_agginfos)
ds.add(agginfo_fpath, save=False, to_git=True,
result_renderer=None, return_type=list)
# queue for save, and mark as staged
to_save.append(
dict(path=agginfo_fpath, type='file', staged=True))
# FIXME look for empty object dirs and remove them
def _load_agginfo_db(ds_path):
return {
# paths in DB on disk are always relative
# make absolute to ease processing during aggregation
op.normpath(op.join(ds_path, p)):
{k: op.normpath(op.join(ds_path, op.dirname(agginfo_relpath), v)) if k in location_keys else v
for k, v in props.items()}
for p, props in _load_json_object(opj(ds_path, agginfo_relpath)).items()
}
def _store_agginfo_db(ds_path, db):
# base path in which aggregate.json and objects is located
agg_base_path = dirname(op.join(ds_path, agginfo_relpath))
# make DB paths on disk always relative
json_py.dump(
{
op.relpath(p, start=ds_path):
{k: op.relpath(v, start=agg_base_path) if k in location_keys else v
for k, v in props.items()}
for p, props in db.items()
},
op.join(ds_path, agginfo_relpath)
)
@build_doc
class AggregateMetaData(Interface):
"""Aggregate metadata of one or more datasets for later query.
Metadata aggregation refers to a procedure that extracts metadata present
in a dataset into a portable representation that is stored a single
standardized format. Moreover, metadata aggregation can also extract
metadata in this format from one dataset and store it in another
(super)dataset. Based on such collections of aggregated metadata it is
possible to discover particular datasets and specific parts of their
content, without having to obtain the target datasets first (see the
DataLad 'search' command).
To enable aggregation of metadata that are contained in files of a dataset,
one has to enable one or more metadata extractor for a dataset. DataLad
supports a number of common metadata standards, such as the Exchangeable
Image File Format (EXIF), Adobe's Extensible Metadata Platform (XMP), and
various audio file metadata systems like ID3. DataLad extension packages
can provide metadata data extractors for additional metadata sources. For
example, the neuroimaging extension provides extractors for scientific
(meta)data standards like BIDS, DICOM, and NIfTI1. Some metadata
extractors depend on particular 3rd-party software. The list of metadata
extractors available to a particular DataLad installation is reported by
the 'wtf' command ('datalad wtf').
Enabling a metadata extractor for a dataset is done by adding its name to the
'datalad.metadata.nativetype' configuration variable -- typically in the
dataset's configuration file (.datalad/config), e.g.::
[datalad "metadata"]
nativetype = exif
nativetype = xmp
If an enabled metadata extractor is not available in a particular DataLad
installation, metadata extraction will not succeed in order to avoid
inconsistent aggregation results.
Enabling multiple extractors is supported. In this case, metadata are
extracted by each extractor individually, and stored alongside each other.
Metadata aggregation will also extract DataLad's own metadata (extractors
'datalad_core', and 'annex').
Metadata aggregation can be performed recursively, in order to aggregate all
metadata across all subdatasets, for example, to be able to search across
any content in any dataset of a collection. Aggregation can also be performed
for subdatasets that are not available locally. In this case, pre-aggregated
metadata from the closest available superdataset will be considered instead.
Depending on the versatility of the present metadata and the number of dataset
or files, aggregated metadata can grow prohibitively large. A number of
configuration switches are provided to mitigate such issues.
datalad.metadata.aggregate-content-<extractor-name>
If set to false, content metadata aggregation will not be performed for
the named metadata extractor (a potential underscore '_' in the extractor name must
be replaced by a dash '-'). This can substantially reduce the runtime for
metadata extraction, and also reduce the size of the generated metadata
aggregate. Note, however, that some extractors may not produce any metadata
when this is disabled, because their metadata might come from individual
file headers only. 'datalad.metadata.store-aggregate-content' might be
a more appropriate setting in such cases.
datalad.metadata.aggregate-ignore-fields
Any metadata key matching any regular expression in this configuration setting
is removed prior to generating the dataset-level metadata summary (keys
and their unique values across all dataset content), and from the dataset
metadata itself. This switch can also be used to filter out sensitive
information prior aggregation.
datalad.metadata.generate-unique-<extractor-name>
If set to false, DataLad will not auto-generate a summary of unique content
metadata values for a particular extractor as part of the dataset-global metadata
(a potential underscore '_' in the extractor name must be replaced by a dash '-').
This can be useful if such a summary is bloated due to minor uninformative (e.g.
numerical) differences, or when a particular extractor already provides a
carefully designed content metadata summary.
datalad.metadata.maxfieldsize
Any metadata value that exceeds the size threshold given by this configuration
setting (in bytes/characters) is removed.
datalad.metadata.store-aggregate-content
If set, extracted content metadata are still used to generate a dataset-level
summary of present metadata (all keys and their unique values across all
files in a dataset are determined and stored as part of the dataset-level
metadata aggregate, see datalad.metadata.generate-unique-<extractor-name>),
but metadata on individual files are not stored.
This switch can be used to avoid prohibitively large metadata files. Discovery
of datasets containing content matching particular metadata properties will
still be possible, but such datasets would have to be obtained first in order
to discover which particular files in them match these properties.
"""
_params_ = dict(
dataset=Parameter(
args=("-d", "--dataset"),
doc="""topmost dataset metadata will be aggregated into. All dataset
between this dataset and any given path will receive updated
aggregated metadata from all given paths.""",
constraints=EnsureDataset() | EnsureNone()),
path=Parameter(
args=("path",),
metavar="PATH",
doc="""path to datasets that shall be aggregated.
When a given path is pointing into a dataset, the metadata of the
containing dataset will be aggregated.""",
nargs="*",
constraints=EnsureStr() | EnsureNone()),
recursive=recursion_flag,
recursion_limit=recursion_limit,
update_mode=Parameter(
args=('--update-mode',),
constraints=EnsureChoice('all', 'target'),
doc="""which datasets to update with newly aggregated metadata:
all datasets from any leaf dataset to the top-level target dataset
including all intermediate datasets (all), or just the top-level
target dataset (target)."""),
incremental=Parameter(
args=('--incremental',),
action='store_true',
doc="""If set, all information on metadata records of subdatasets
that have not been (re-)aggregated in this run will be kept unchanged.
This is useful when (re-)aggregation only a subset of a dataset hierarchy,
for example, because not all subdatasets are locally available."""),
force_extraction=Parameter(
args=('--force-extraction',),
action='store_true',
doc="""If set, all enabled extractors will be engaged regardless of
whether change detection indicates that metadata has already been
extracted for a given dataset state."""),
save=nosave_opt,
)
@staticmethod
@datasetmethod(name='aggregate_metadata')
@eval_results
def __call__(
path=None,
dataset=None,
recursive=False,
recursion_limit=None,
update_mode='target',
incremental=False,
force_extraction=False,
save=True):
refds_path = Interface.get_refds_path(dataset)
# it really doesn't work without a dataset
ds = require_dataset(
dataset, check_installed=True, purpose='metadata aggregation')
# always include the reference dataset
path = assure_list(path)
path.append(ds.path)
agginfo_db = _load_agginfo_db(ds.path)
to_save = []
to_aggregate = set()
for ap in AnnotatePaths.__call__(
dataset=refds_path,
path=path,
recursive=recursive,
recursion_limit=recursion_limit,
action='aggregate_metadata',
# uninstalled subdatasets could be queried via aggregated metadata
# -> no 'error'
unavailable_path_status='',
nondataset_path_status='error',
return_type='generator',
on_failure='ignore'):
if ap.get('status', None):
# this is done
yield ap
continue
ap_type = ap.get('type', None)
ap_state = ap.get('state', None)
assert('parentds' in ap or ap_type == 'dataset')
if ap_type == 'dataset' and ap_state != 'absent':
# a present dataset, we can take directly from it
aggsrc = ap['path']
lgr.info('Aggregate metadata for dataset %s', aggsrc)
else:
# everything else needs to come from the parent
aggsrc = ap['parentds']
if ap_state == 'absent':
lgr.info(
'Attempt to use pre-aggregate metadata for absent %s from dataset at %s',
ap['path'],
aggsrc)
else:
lgr.info(
'Aggregate metadata for %s from dataset at %s',
ap['path'],
aggsrc)
to_aggregate.add(aggsrc)
if ap_state == 'absent':
# key thought: recursive is done by path annotation, hence
# once we hit an absent dataset, we are 100% certain that
# there is nothing to recurse into on the file system
# hence we only have to look into the aggregated metadata
# of the last available dataset in the dataset tree edge
#
# if there is nothing at this path, we need to look into the
# parentds and check if we know anything about this path
# if we do, we need to grab all the info and objects
# if not, we need to error
res = _get_dsinfo_from_aggmetadata(
aggsrc, ap['path'], recursive, agginfo_db)
if not isinstance(res, list):
yield get_status_dict(
status='impossible',
message=res,
action='aggregate_metadata',
path=ap['path'],
logger=lgr)
continue
# cue for aggregation
to_aggregate.update(res)
else:
# actually aggregate metadata for this dataset, immediately place
# generated objects into the aggregated or reference dataset,
# and put info into DB to get the distributed to all datasets
# that need to be updated
errored = _dump_extracted_metadata(
ds,
Dataset(aggsrc),
agginfo_db,
to_save,
force_extraction)
if errored:
yield get_status_dict(
status='error',
message='Metadata extraction failed (see previous error message, set datalad.runtime.raiseonerror=yes to fail immediately)',
action='aggregate_metadata',
path=aggsrc,
logger=lgr)
# at this point we have dumped all aggregated metadata into object files
# somewhere, we know what needs saving, but having saved anything, and
# we know about the states of all aggregated dataset in the DB
# what remains to do is to update all dataset, so they have there own copy
# of aggregated metadata and update their respective aggregate.json with
# info on what states we just aggregated from
# first, let's figure out what dataset need updating at all
# get adjencency info of the dataset tree spanning the base to all leaf dataset
# associated with the path arguments
if update_mode == 'all':
ds_adj = {}
discover_dataset_trace_to_targets(
ds.path, to_aggregate, [], ds_adj,
# we know that to_aggregate only lists datasets, existing and
# absent ones -- we want to aggregate all of them, either from
# just extracted metadata, or from previously aggregated metadata
# of the closest superdataset
includeds=to_aggregate)
# TODO we need to work in the info about dataset that we only got from
# aggregated metadata, that had no trace on the file system in here!!
subtrees = _adj2subtrees(ds.path, ds_adj, to_aggregate)
elif update_mode == 'target':
subtrees = {ds.path: list(agginfo_db.keys())}
else:
raise ValueError(
"unknown `update_mode` '%s' for metadata aggregation", update_mode)
# go over datasets in bottom-up fashion
for parentds_path in sorted(subtrees, reverse=True):
lgr.info('Update aggregate metadata in dataset at: %s', parentds_path)
_update_ds_agginfo(
ds.path,
parentds_path,
subtrees[parentds_path],
incremental,
agginfo_db,
to_save)
# update complete
res = get_status_dict(
status='ok',
action='aggregate_metadata',
path=parentds_path,
type='dataset',
logger=lgr)
res.update(agginfo_db.get(parentds_path, {}))
yield res
#
# save potential modifications to dataset global metadata
#
if not to_save:
return
lgr.info('Attempting to save %i files/datasets', len(to_save))
for res in Save.__call__(
path=to_save,
dataset=refds_path,
message='[DATALAD] Dataset aggregate metadata update',
return_type='generator',
result_xfm=None,
result_filter=None,
on_failure='ignore'):
yield res
| 41.737895 | 148 | 0.638496 |
bb73395bd605264ccf66f386c60be56bb28c7a54 | 201 | py | Python | tests/test_project/api/urls.py | ShreeshaRelysys/openwisp-utils | 7c0b5f249b0e8e1f3af7bf1942b6543c9375dd75 | [
"BSD-3-Clause"
] | 80 | 2017-06-28T08:17:41.000Z | 2022-03-29T19:38:31.000Z | tests/test_project/api/urls.py | ShreeshaRelysys/openwisp-utils | 7c0b5f249b0e8e1f3af7bf1942b6543c9375dd75 | [
"BSD-3-Clause"
] | 275 | 2017-06-22T09:57:40.000Z | 2022-03-29T19:20:12.000Z | tests/test_project/api/urls.py | ShreeshaRelysys/openwisp-utils | 7c0b5f249b0e8e1f3af7bf1942b6543c9375dd75 | [
"BSD-3-Clause"
] | 91 | 2017-06-22T10:12:16.000Z | 2022-03-19T12:53:55.000Z | from django.urls import re_path
from . import views
urlpatterns = [
re_path(
r'^receive_project/(?P<pk>[^/\?]+)/$',
views.receive_project,
name='receive_project',
)
]
| 16.75 | 46 | 0.587065 |
9c79db4eb1cafba18fee252bb250583513fc7c12 | 2,294 | py | Python | endpage.py | MarcelloEdocia/battleship_remake | ab26edbfc42dae5d96d11540f7f14fc7f1bbbf68 | [
"Apache-2.0"
] | null | null | null | endpage.py | MarcelloEdocia/battleship_remake | ab26edbfc42dae5d96d11540f7f14fc7f1bbbf68 | [
"Apache-2.0"
] | null | null | null | endpage.py | MarcelloEdocia/battleship_remake | ab26edbfc42dae5d96d11540f7f14fc7f1bbbf68 | [
"Apache-2.0"
] | null | null | null | import tkinter
class EndPage(tkinter.Frame):
def __init__(self, parent, App):
self.application = App
self.config = App.config
super().__init__(parent)
self.configure(bg="grey")
self.grid(row=0, column=0, sticky="nsew")
parent.grid_rowconfigure(0, weight=1)
parent.grid_columnconfigure(0, weight=1)
self.pixelVirtual = tkinter.PhotoImage(width=5, height=2)
self.button_width = self.config.side//4
self.button_height = self.config.side//20
self.pixelVirtual = tkinter.PhotoImage(width=5, height=2)
self.button_width = self.config.side//2.5
self.button_height = self.config.side//20
self.mainFrame = tkinter.Frame(self, height=self.config.side, width=self.config.side, bg="grey")
self.mainFrame.pack(expand=True)
self.create_label()
self.create_playbutton()
self.see_highscore()
self.create_exitbutton()
def create_label(self):
self.firsttext = tkinter.Label(self.mainFrame, text="You Win!", font=("Arial", 22), bg="grey")
self.firsttext.pack()
self.secondtext = tkinter.Label(self.mainFrame, text="", bg="grey")
self.secondtext.pack()
self.thirdtext = tkinter.Label(self.mainFrame, text="", bg="grey")
self.thirdtext.pack()
def create_playbutton(self):
self.playbutton = tkinter.Button(self.mainFrame, text="Play again", font=("Arial", 18), image=self.pixelVirtual, width=self.button_width, height=self.button_height, compound="c", command=lambda:self.application.play_again())
self.playbutton.pack(pady=10)
def create_exitbutton(self):
self.exitbutton = tkinter.Button(self.mainFrame, text="Exit", font=("Arial", 18), command=lambda:self.application.create_popup(), image=self.pixelVirtual, width=self.button_width,height=self.button_height, compound="c")
self.exitbutton.pack(pady=10)
def see_highscore(self):
self.returnbutton = tkinter.Button(self.mainFrame, text="Return", font=("Arial", 18), command=lambda:self.application.gotomenu(), image=self.pixelVirtual, width=self.button_width,height=self.button_height, compound="c")
self.returnbutton.pack(pady=50) | 44.980392 | 233 | 0.664342 |
bf5ce8942686127a7a2d145cf0175f61e09e54c7 | 4,882 | py | Python | tests/e2e/tests/test_kiali_workload_endpoint.py | jsloyer/kiali | bd7e97b46c936fe9ce0d9fffa3b89d06f676c24e | [
"Apache-2.0"
] | null | null | null | tests/e2e/tests/test_kiali_workload_endpoint.py | jsloyer/kiali | bd7e97b46c936fe9ce0d9fffa3b89d06f676c24e | [
"Apache-2.0"
] | null | null | null | tests/e2e/tests/test_kiali_workload_endpoint.py | jsloyer/kiali | bd7e97b46c936fe9ce0d9fffa3b89d06f676c24e | [
"Apache-2.0"
] | null | null | null | import pytest
import tests.conftest as conftest
import time
from utils.command_exec import command_exec
from utils.timeout import timeout
WORKLOAD_TO_VALIDATE = 'details-v1'
WORKLOAD_TYPE = 'Deployment'
BOOKINFO_WORKLOADS_COUNT = 6
EXTRA_WORKLOAD_COUNT = 4
EXTRA_WORKLOADS = set(['details-v2', 'reviews-v4', 'reviews-v5','reviews-v6'])
def test_workload_list_endpoint(kiali_client):
bookinfo_namespace = conftest.get_bookinfo_namespace()
workload_list = kiali_client.request(method_name='workloadList', path={'namespace': bookinfo_namespace}).json()
assert workload_list != None
for workload in workload_list.get('workloads'):
assert workload != None
assert workload.get('name') != None and workload.get('name') != ''
if ('traffic-generator' not in workload.get('name')):
assert workload.get('istioSidecar') == True
assert workload.get('versionLabel') == True
assert workload.get('appLabel') == True
def test_diversity_in_workload_list_endpoint(kiali_client):
bookinfo_namespace = conftest.get_bookinfo_namespace()
try:
# Add extra workloads that will be tested
assert command_exec.oc_apply(conftest.WORKLOADS_FILE, bookinfo_namespace) == True
with timeout(seconds=90, error_message='Timed out waiting for extra workloads creation'):
while True:
workload_list = kiali_client.request(method_name='workloadList', path={'namespace': bookinfo_namespace}).json()
if workload_list != None and workload_list.get('workloads') != None:
workload_names = set(list(map(lambda workload: workload.get('name'), workload_list.get('workloads'))))
if EXTRA_WORKLOADS.issubset(workload_names):
break
time.sleep(1)
# Dictionary that maps Workloads with its own types
dicWorkloadType = {
'details-v2': 'Pod',
'reviews-v4': 'ReplicaSet',
'reviews-v5': 'ReplicationController',
'reviews-v6': 'StatefulSet'
}
for workload in workload_list.get('workloads'):
if workload.get('name') in EXTRA_WORKLOADS:
workloadType = dicWorkloadType[workload.get('name')]
assert workload.get('type') == workloadType
finally:
assert command_exec.oc_delete(conftest.WORKLOADS_FILE, bookinfo_namespace) == True
with timeout(seconds=90, error_message='Timed out waiting for extra workloads deletion'):
print('Extra workloads added for this test:', EXTRA_WORKLOADS)
while True:
workload_list = kiali_client.request(method_name='workloadList', path={'namespace': bookinfo_namespace}).json()
if workload_list != None and workload_list.get('workloads') != None:
workload_names = set(list(map(lambda workload: workload.get('name'), workload_list.get('workloads'))))
print('Still existing workloads:', workload_names)
if EXTRA_WORKLOADS.intersection(workload_names) == set():
break
time.sleep(1)
def test_workload_details(kiali_client):
bookinfo_namespace = conftest.get_bookinfo_namespace()
workload = kiali_client.request(method_name='workloadDetails', path={'namespace': bookinfo_namespace, 'workload': WORKLOAD_TO_VALIDATE}).json()
assert workload != None
assert WORKLOAD_TO_VALIDATE in workload.get('name')
assert WORKLOAD_TYPE in workload.get('type')
assert 'labels' in workload
assert 'pods' in workload
assert 'services' in workload
def test_workload_metrics(kiali_client):
bookinfo_namespace = conftest.get_bookinfo_namespace()
workload = kiali_client.request(method_name='workloadMetrics', path={'namespace': bookinfo_namespace, 'workload': WORKLOAD_TO_VALIDATE}).json()
for direction in ['dest', 'source']:
assert workload != None
metrics = workload.get(direction).get('metrics')
assert 'request_count_in' in metrics
assert 'request_count_out' in metrics
assert 'request_error_count_in' in metrics
assert 'request_error_count_out' in metrics
assert 'tcp_received_in' in metrics
assert 'tcp_received_out' in metrics
assert 'tcp_sent_in' in metrics
assert 'tcp_sent_out' in metrics
histograms = workload.get(direction).get('histograms')
assert 'request_duration_in' in histograms
assert 'request_duration_out' in histograms
assert 'request_size_in' in histograms
assert 'request_size_out' in histograms
assert 'response_size_in' in histograms
assert 'response_size_out' in histograms
def test_workload_health(kiali_client):
bookinfo_namespace = conftest.get_bookinfo_namespace()
workload = kiali_client.request(method_name='workloadHealth', path={'namespace': bookinfo_namespace, 'workload': WORKLOAD_TO_VALIDATE}).json()
assert workload != None
assert WORKLOAD_TO_VALIDATE in workload.get('workloadStatus').get('name')
assert 'requests' in workload
| 42.452174 | 147 | 0.723884 |
d40b0e6d3c923233ed749f2749e1c5a5a27bc1ce | 4,348 | py | Python | docs/conf.py | wesleykendall/django-kmatch | 0ca5d8465461210aa98fd3fb9afd2ec3838a4f9b | [
"MIT"
] | null | null | null | docs/conf.py | wesleykendall/django-kmatch | 0ca5d8465461210aa98fd3fb9afd2ec3838a4f9b | [
"MIT"
] | 2 | 2015-03-27T18:10:34.000Z | 2015-03-30T17:39:44.000Z | docs/conf.py | wesleykendall/django-kmatch | 0ca5d8465461210aa98fd3fb9afd2ec3838a4f9b | [
"MIT"
] | 5 | 2015-03-27T17:49:20.000Z | 2016-11-28T22:29:54.000Z | # -*- coding: utf-8 -*-
#
# django-kmatch documentation build configuration file
import os
import re
def get_version():
"""
Extracts the version number from the version.py file.
"""
VERSION_FILE = '../django_kmatch/version.py'
mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
#'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'toc'
# General information about the project.
project = u'django_kmatch'
copyright = u'2014, Ambition Inc.'
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = version
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
intersphinx_mapping = {
'python': ('http://python.readthedocs.org/en/v2.7.2/', None),
'django': ('http://django.readthedocs.org/en/latest/', None),
#'celery': ('http://celery.readthedocs.org/en/latest/', None),
}
# -- Options for HTML output ----------------------------------------------
html_theme = 'default'
#html_theme_path = []
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-kmatchdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'django-kmatch.tex', u'django-kmatch Documentation',
u'Wes Kendall', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-kmatch', u'django-kmatch Documentation',
[u'Wes Kendall'], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-kmatch', u'django-kmatch Documentation',
u'Wes Kendall', 'django-kmatch', 'A short description',
'Miscellaneous'),
]
# -- Django configuration -------------------------------------------------
import sys
sys.path.insert(0, os.path.abspath('..'))
from settings import configure_settings
configure_settings()
| 30.619718 | 96 | 0.663293 |
b28296d62c8c5fdb41c330ad745a325f3e511dc9 | 936 | py | Python | get_data.py | himoto/3d-surface-plot | 733c5195b1b35a218fdce51d6aa49a67f4159e95 | [
"MIT"
] | 2 | 2020-07-01T18:49:16.000Z | 2021-12-22T12:14:51.000Z | get_data.py | himoto/3d-surface-plot | 733c5195b1b35a218fdce51d6aa49a67f4159e95 | [
"MIT"
] | null | null | null | get_data.py | himoto/3d-surface-plot | 733c5195b1b35a218fdce51d6aa49a67f4159e95 | [
"MIT"
] | null | null | null | import os
import numpy as np
from biomass.dynamics.solver import *
from tqdm import tqdm
from model import *
def compute_matrix():
os.makedirs("data", exist_ok=True)
sim_n = 101
sim_t = range(5401)
sim_ligand = ["EGF", "HRG"]
z_cFosmRNA = np.empty((len(sim_ligand), sim_n, len(sim_t)))
norm_max = np.empty_like(sim_ligand, dtype=float)
x = param_values()
y0 = initial_values()
y_ss = get_steady_state(diffeq, y0, tuple(x))
for i in tqdm(range(sim_n)):
x = param_values()
x[C.p11] *= (1 - 0.01 * i)
for j, ligand in enumerate(sim_ligand):
x[C.Ligand] = x[C.NAMES.index(ligand)]
sol = solve_ode(diffeq, y_ss, sim_t, tuple(x))
if i == 0:
norm_max[j] = np.max(sol.y[V.cfosmRNAc, :])
z_cFosmRNA[j, i, :] = sol.y[V.cfosmRNAc, :] / norm_max[j]
np.save(os.path.join("data", "z_cFosmRNA.npy"), z_cFosmRNA)
| 28.363636 | 69 | 0.595085 |
83c0aa5a04c40b72fb40975c94f5be4c44f968a1 | 12,636 | py | Python | tfx/extensions/google_cloud_ai_platform/runner.py | seyedrezamirkhani/tfx | b2dc2338b27d6ccf1cae690e65c4e358e17ec626 | [
"Apache-2.0"
] | 1 | 2021-07-21T15:54:20.000Z | 2021-07-21T15:54:20.000Z | tfx/extensions/google_cloud_ai_platform/runner.py | seyedrezamirkhani/tfx | b2dc2338b27d6ccf1cae690e65c4e358e17ec626 | [
"Apache-2.0"
] | 1 | 2020-08-28T09:59:13.000Z | 2020-08-28T09:59:13.000Z | tfx/extensions/google_cloud_ai_platform/runner.py | seyedrezamirkhani/tfx | b2dc2338b27d6ccf1cae690e65c4e358e17ec626 | [
"Apache-2.0"
] | 1 | 2020-11-06T11:44:33.000Z | 2020-11-06T11:44:33.000Z | # Lint as: python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper class to start TFX training jobs on AI Platform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import sys
import time
from typing import Any, Dict, List, Optional, Text
from absl import logging
from googleapiclient import discovery
from googleapiclient import errors
import tensorflow as tf
from tfx import types
from tfx import version
from tfx.types import artifact_utils
from tfx.utils import telemetry_utils
_POLLING_INTERVAL_IN_SECONDS = 30
_CONNECTION_ERROR_RETRY_LIMIT = 5
# TODO(b/139934802) Ensure mirroring of released TFX containers in Docker Hub
# and gcr.io/tfx-oss-public/ registries.
_TFX_IMAGE = 'gcr.io/tfx-oss-public/tfx:{}'.format(version.__version__)
_TF_COMPATIBILITY_OVERRIDE = {
# Generally, runtimeVersion should be same as <major>.<minor> of currently
# installed tensorflow version, with certain compatibility hacks since
# some TensorFlow runtime versions are not explicitly supported by
# CAIP pusher. See:
# https://cloud.google.com/ai-platform/prediction/docs/runtime-version-list
'2.0': '1.15',
# TODO(b/157039850) Remove this once CAIP model support TF 2.2 runtime.
'2.2': '2.1',
'2.3': '2.1',
'2.4': '2.1'
}
def _get_tf_runtime_version(tf_version: Text) -> Text:
"""Returns the tensorflow runtime version used in Cloud AI Platform.
This is only used for prediction service.
Args:
tf_version: version string returned from `tf.__version__`.
Returns: same major.minor version of installed tensorflow, except when
overriden by _TF_COMPATIBILITY_OVERRIDE.
"""
tf_version = '.'.join(tf_version.split('.')[0:2])
return _TF_COMPATIBILITY_OVERRIDE.get(tf_version) or tf_version
def _get_caip_python_version(caip_tf_runtime_version: Text) -> Text:
"""Returns supported python version on Cloud AI Platform.
See
https://cloud.google.com/ml-engine/docs/tensorflow/versioning#set-python-version-training
Args:
caip_tf_runtime_version: version string returned from
_get_tf_runtime_version().
Returns:
'2.7' for PY2. '3.5' or '3.7' for PY3 depending on caip_tf_runtime_version.
"""
if sys.version_info.major == 2:
return '2.7'
(major, minor) = caip_tf_runtime_version.split('.')[0:2]
if (int(major), int(minor)) >= (1, 15):
return '3.7'
return '3.5'
def start_aip_training(input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text,
Any], executor_class_path: Text,
training_inputs: Dict[Text,
Any], job_id: Optional[Text]):
"""Start a trainer job on AI Platform (AIP).
This is done by forwarding the inputs/outputs/exec_properties to the
tfx.scripts.run_executor module on a AI Platform training job interpreter.
Args:
input_dict: Passthrough input dict for tfx.components.Trainer.executor.
output_dict: Passthrough input dict for tfx.components.Trainer.executor.
exec_properties: Passthrough input dict for tfx.components.Trainer.executor.
executor_class_path: class path for TFX core default trainer.
training_inputs: Training input argument for AI Platform training job.
'pythonModule', 'pythonVersion' and 'runtimeVersion' will be inferred. For
the full set of parameters, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
job_id: Job ID for AI Platform Training job. If not supplied,
system-determined unique ID is given. Refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#resource-job
Returns:
None
Raises:
RuntimeError: if the Google Cloud AI Platform training job failed/cancelled.
"""
training_inputs = training_inputs.copy()
json_inputs = artifact_utils.jsonify_artifact_dict(input_dict)
logging.info('json_inputs=\'%s\'.', json_inputs)
json_outputs = artifact_utils.jsonify_artifact_dict(output_dict)
logging.info('json_outputs=\'%s\'.', json_outputs)
json_exec_properties = json.dumps(exec_properties, sort_keys=True)
logging.info('json_exec_properties=\'%s\'.', json_exec_properties)
# Configure AI Platform training job
api_client = discovery.build('ml', 'v1')
# We use custom containers to launch training on AI Platform, which invokes
# the specified image using the container's entrypoint. The default
# entrypoint for TFX containers is to call scripts/run_executor.py. The
# arguments below are passed to this run_executor entry to run the executor
# specified in `executor_class_path`.
job_args = [
'--executor_class_path', executor_class_path, '--inputs', json_inputs,
'--outputs', json_outputs, '--exec-properties', json_exec_properties
]
if not training_inputs.get('masterConfig'):
training_inputs['masterConfig'] = {
'imageUri': _TFX_IMAGE,
}
training_inputs['args'] = job_args
# Pop project_id so AIP doesn't complain about an unexpected parameter.
# It's been a stowaway in aip_args and has finally reached its destination.
project = training_inputs.pop('project')
project_id = 'projects/{}'.format(project)
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):
job_labels = telemetry_utils.get_labels_dict()
# 'tfx_YYYYmmddHHMMSS' is the default job ID if not explicitly specified.
job_id = job_id or 'tfx_{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
job_spec = {
'jobId': job_id,
'trainingInput': training_inputs,
'labels': job_labels,
}
# Submit job to AIP Training
logging.info('Submitting job=\'%s\', project=\'%s\' to AI Platform.', job_id,
project)
request = api_client.projects().jobs().create(
body=job_spec, parent=project_id)
request.execute()
# Wait for AIP Training job to finish
job_name = '{}/jobs/{}'.format(project_id, job_id)
request = api_client.projects().jobs().get(name=job_name)
response = request.execute()
retry_count = 0
# Monitors the long-running operation by polling the job state periodically,
# and retries the polling when a transient connectivity issue is encountered.
#
# Long-running operation monitoring:
# The possible states of "get job" response can be found at
# https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#State
# where SUCCEEDED/FAILED/CANCELLED are considered to be final states.
# The following logic will keep polling the state of the job until the job
# enters a final state.
#
# During the polling, if a connection error was encountered, the GET request
# will be retried by recreating the Python API client to refresh the lifecycle
# of the connection being used. See
# https://github.com/googleapis/google-api-python-client/issues/218
# for a detailed description of the problem. If the error persists for
# _CONNECTION_ERROR_RETRY_LIMIT consecutive attempts, the function will exit
# with code 1.
while response['state'] not in ('SUCCEEDED', 'FAILED', 'CANCELLED'):
time.sleep(_POLLING_INTERVAL_IN_SECONDS)
try:
response = request.execute()
retry_count = 0
# Handle transient connection error.
except ConnectionError as err:
if retry_count < _CONNECTION_ERROR_RETRY_LIMIT:
retry_count += 1
logging.warning(
'ConnectionError (%s) encountered when polling job: %s. Trying to '
'recreate the API client.', err, job_id)
# Recreate the Python API client.
api_client = discovery.build('ml', 'v1')
request = api_client.projects().jobs().get(name=job_name)
else:
# TODO(b/158433873): Consider raising the error instead of exit with
# code 1 after CMLE supports configurable retry policy.
# Currently CMLE will automatically retry the job unless return code
# 1-128 is returned.
logging.error('Request failed after %s retries.',
_CONNECTION_ERROR_RETRY_LIMIT)
sys.exit(1)
if response['state'] in ('FAILED', 'CANCELLED'):
err_msg = 'Job \'{}\' did not succeed. Detailed response {}.'.format(
job_name, response)
logging.error(err_msg)
raise RuntimeError(err_msg)
# AIP training complete
logging.info('Job \'%s\' successful.', job_name)
def deploy_model_for_aip_prediction(
serving_path: Text,
model_version: Text,
ai_platform_serving_args: Dict[Text, Any],
executor_class_path: Text,
):
"""Deploys a model for serving with AI Platform.
Args:
serving_path: The path to the model. Must be a GCS URI.
model_version: Version of the model being deployed. Must be different from
what is currently being served.
ai_platform_serving_args: Dictionary containing arguments for pushing to AI
Platform. For the full set of parameters supported, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions#Version
executor_class_path: class path for TFX core default trainer.
Raises:
RuntimeError: if an error is encountered when trying to push.
"""
logging.info(
'Deploying to model with version %s to AI Platform for serving: %s',
model_version, ai_platform_serving_args)
model_name = ai_platform_serving_args['model_name']
project_id = ai_platform_serving_args['project_id']
regions = ai_platform_serving_args.get('regions', [])
default_runtime_version = _get_tf_runtime_version(tf.__version__)
runtime_version = ai_platform_serving_args.get('runtime_version',
default_runtime_version)
python_version = _get_caip_python_version(runtime_version)
api = discovery.build('ml', 'v1')
body = {'name': model_name, 'regions': regions}
parent = 'projects/{}'.format(project_id)
try:
api.projects().models().create(body=body, parent=parent).execute()
except errors.HttpError as e:
# If the error is to create an already existing model, it's ok to ignore.
# TODO(b/135211463): Remove the disable once the pytype bug is fixed.
if e.resp.status == 409: # pytype: disable=attribute-error
logging.warn('Model %s already exists', model_name)
else:
raise RuntimeError('AI Platform Push failed: {}'.format(e))
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):
job_labels = telemetry_utils.get_labels_dict()
body = {
'name': model_version,
'deployment_uri': serving_path,
'runtime_version': runtime_version,
'python_version': python_version,
'labels': job_labels,
}
# Push to AIP, and record the operation name so we can poll for its state.
model_name = 'projects/{}/models/{}'.format(project_id, model_name)
response = api.projects().models().versions().create(
body=body, parent=model_name).execute()
op_name = response['name']
deploy_status_resc = api.projects().operations().get(name=op_name)
while not deploy_status_resc.execute().get('done'):
time.sleep(_POLLING_INTERVAL_IN_SECONDS)
logging.info('Model still being deployed...')
deploy_status = deploy_status_resc.execute()
if deploy_status.get('error'):
# The operation completed with an error.
raise RuntimeError(
'Failed to deploy model to AI Platform for serving: {}'.format(
deploy_status['error']))
# Set the new version as default.
# By API specification, if Long-Running-Operation is done and there is
# no error, 'response' is guaranteed to exist.
api.projects().models().versions().setDefault(name='{}/versions/{}'.format(
model_name, deploy_status['response']['name'])).execute()
logging.info(
'Successfully deployed model %s with version %s, serving from %s',
model_name, model_version, serving_path)
| 40.114286 | 94 | 0.714941 |
17fd2f4f724fa323f264ea8111f1726eb7895c5e | 850 | py | Python | apoli/apoli/urls.py | masaki1002/backend | 24d186f3a224a0ee0e8f410b9c0d4832881c21e3 | [
"MIT"
] | null | null | null | apoli/apoli/urls.py | masaki1002/backend | 24d186f3a224a0ee0e8f410b9c0d4832881c21e3 | [
"MIT"
] | null | null | null | apoli/apoli/urls.py | masaki1002/backend | 24d186f3a224a0ee0e8f410b9c0d4832881c21e3 | [
"MIT"
] | null | null | null | """apoli URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('user.urls')),
path('api/recipe/', include('recipe.urls')),
]
| 35.416667 | 77 | 0.696471 |
7a30ecc970c7975dfeda75d8772592219d20fa7a | 1,535 | py | Python | tests/events/test_mq.py | chuckwondo/aws-lambda-typing | 8417ab67f2492be1508fe38b2c34bc106619a56d | [
"MIT"
] | 29 | 2021-01-07T13:35:16.000Z | 2022-03-25T07:20:54.000Z | tests/events/test_mq.py | chuckwondo/aws-lambda-typing | 8417ab67f2492be1508fe38b2c34bc106619a56d | [
"MIT"
] | 13 | 2021-02-28T00:31:00.000Z | 2022-03-29T15:24:01.000Z | tests/events/test_mq.py | chuckwondo/aws-lambda-typing | 8417ab67f2492be1508fe38b2c34bc106619a56d | [
"MIT"
] | 5 | 2021-02-27T13:50:42.000Z | 2022-01-13T15:05:44.000Z | from aws_lambda_typing.events import MQEvent
def test_mq_event() -> None:
event: MQEvent = {
"eventSource": "aws:amq",
"eventSourceArn": "arn:aws:mq:us-west-2:112556298976:broker:test:b-9bcfa592-423a-4942-879d-eb284b418fc8", # noqa: E501
"messages": {
[
{
"messageID": "ID:b-9bcfa592-423a-4942-879d-eb284b418fc8-1.mq.us-west-2.amazonaws.com-37557-1234520418293-4:1:1:1:1", # noqa: E501
"messageType": "jms/text-message",
"data": "QUJDOkFBQUE=",
"connectionId": "myJMSCoID",
"redelivered": False,
"destination": {"physicalname": "testQueue"},
"timestamp": 1598827811958,
"brokerInTime": 1598827811958,
"brokerOutTime": 1598827811959,
},
{
"messageID": "ID:b-9bcfa592-423a-4942-879d-eb284b418fc8-1.mq.us-west-2.amazonaws.com-37557-1234520418293-4:1:1:1:1", # noqa: E501
"messageType": "jms/bytes-message",
"data": "3DTOOW7crj51prgVLQaGQ82S48k=",
"connectionId": "myJMSCoID1",
"persistent": False,
"destination": {"physicalname": "testQueue"},
"timestamp": 1598827811958,
"brokerInTime": 1598827811958,
"brokerOutTime": 1598827811959,
},
]
},
}
| 43.857143 | 150 | 0.49772 |
81f132148495342e6188c1aca3e6d01510a0be15 | 1,462 | py | Python | custom/nic_compliance/tests/test_utils.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | custom/nic_compliance/tests/test_utils.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | custom/nic_compliance/tests/test_utils.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | from django.test import TestCase
from custom.nic_compliance.utils import extract_password
OBFUSCATED_PASSWORD_MAPPING = {
"sha256$1e2d5bc2hhMjU2JDFlMmQ1Yk1USXpORFUyZjc5MTI3PQ==f79127=": "123456",
"sha256$8f5008c2hhMjU2JDhmNTAwOFlXSmpNVEl6TFE9PTRhNjBhOT0=4a60a9=": "abc123-",
"sha256$4bf7cdc2hhMjU2JDRiZjdjZE1USXpRQ01rSlRFeTEzMGM4ZD0=130c8d=": "123@#$%12",
"sha256$29df66c2hhMjU2JDI5ZGY2NklDRkFJeVFsWGlZcUtDbGZLeTFjYTQwN2VkPQ==a407ed=": " !@#$%^&*()_+-\\",
"sha256$ad5e3ac2hhMjU2JGFkNWUzYTRLU0o0S1NxNEtTVjRLU3c0S1NqTVRJejQyNDgyOT0=424829=": "उपकरण123",
"sha256$nhiyhsc2hhMjU2JG5oaXloc2FsWmlWVUEvVmxsWk5RPT16eWwzeHU9zyl3xu=": "jVbU@?VYY5"
}
class TestExtractPassword(TestCase):
def test_password_decoding(self):
for obfuscated_password, password in OBFUSCATED_PASSWORD_MAPPING.items():
self.assertEqual(extract_password(obfuscated_password), password)
def test_invalid_regex_format(self):
obfuscated_password = "sha255$1e2d5bc2hhMjU2JDFlMmQ1Yk1USXpORFUyZjc5MTI3PQ==f79127="
self.assertEqual(extract_password(obfuscated_password), None)
obfuscated_password = "sha255$1e2d5bc2hhMjU2JDFlMmQ1Yk1USXpORFUyZjc5MTI3PQ=="
self.assertEqual(extract_password(obfuscated_password), None)
def test_invalid_padding(self):
obfuscated_password = "sha256$1e456bc2hhMjU2JDFlMmQ1Yk1USXpORFUyZjc5MTI3PQ==f79127="
self.assertEqual(extract_password(obfuscated_password), '')
| 48.733333 | 103 | 0.787962 |
2584aaa1d2f37ee517c9ac1bce084409d8b80da8 | 1,094 | py | Python | week7-8/week8/hw32/file_stats.py | fac33/py_intro_exercise | e2c3f3044537b5cc8980bf7aa7651fd16c5fd34b | [
"MIT"
] | null | null | null | week7-8/week8/hw32/file_stats.py | fac33/py_intro_exercise | e2c3f3044537b5cc8980bf7aa7651fd16c5fd34b | [
"MIT"
] | null | null | null | week7-8/week8/hw32/file_stats.py | fac33/py_intro_exercise | e2c3f3044537b5cc8980bf7aa7651fd16c5fd34b | [
"MIT"
] | null | null | null | ################################################################################
# Author:Fanyang Cheng
# Date:27/03/2021
# Description: This file read one sepcific text file and track the number of words
#, number of lines and the average words per line.
################################################################################
def main():
with open('rumpelstiltskin.txt','r') as fo:
cont = fo.read()
cont_l = cont.split("\n") # split by enter.
l_n = len(cont_l)
while not (cont_l[-1]): #at the end of file, if there are several blank lines, then delete them.
l_n -= 1
cont_l.pop()
cont_wm = " ".join(cont_l)
cont_wt = cont_wm.split(" ") # split by blank
cont_w = [i for i in cont_wt if i] #delete the '' item in the list created by split method.
w_n = len(cont_w)
avg = w_n/l_n
print("Total number of words:",w_n)
print("Total number of lines:",l_n)
print("Average number of words per line:",format(avg,'.1f'))
if __name__ == '__main__':
main()
| 40.518519 | 106 | 0.508227 |
7bd8f8d78b2c4f5dccb96fea010dd3900e9e48d8 | 812 | py | Python | log_offload/__init__.py | our-city-app/log-offload | 1842dbd6d10af4435582f1d1a9994c9dc15c6d0c | [
"Apache-2.0"
] | null | null | null | log_offload/__init__.py | our-city-app/log-offload | 1842dbd6d10af4435582f1d1a9994c9dc15c6d0c | [
"Apache-2.0"
] | 1 | 2018-09-13T11:00:59.000Z | 2022-03-23T08:31:35.000Z | log_offload/__init__.py | our-city-app/log-offload | 1842dbd6d10af4435582f1d1a9994c9dc15c6d0c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Green Valley Belgium NV
# NOTICE: THIS FILE HAS BEEN MODIFIED BY GREEN VALLEY BELGIUM NV IN ACCORDANCE WITH THE APACHE LICENSE VERSION 2.0
# Copyright 2018 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.6@@
__version__ = '0.2.0'
| 38.666667 | 114 | 0.751232 |
ce57b1f3c2f669f588fcecc073631f78263acb2c | 4,707 | py | Python | amazon_paapi/sdk/models/browse_node_info.py | frenners/python-amazon-paapi | 9cb744bef17f5127231367430191df12126e9c24 | [
"MIT"
] | 121 | 2020-03-07T06:56:56.000Z | 2022-03-30T21:03:43.000Z | amazon_paapi/sdk/models/browse_node_info.py | frenners/python-amazon-paapi | 9cb744bef17f5127231367430191df12126e9c24 | [
"MIT"
] | 61 | 2020-03-03T11:03:58.000Z | 2022-01-20T19:27:49.000Z | amazon_paapi/sdk/models/browse_node_info.py | frenners/python-amazon-paapi | 9cb744bef17f5127231367430191df12126e9c24 | [
"MIT"
] | 29 | 2020-03-04T18:08:22.000Z | 2022-03-25T18:49:46.000Z | # coding: utf-8
"""
Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
"""
"""
ProductAdvertisingAPI
https://webservices.amazon.com/paapi5/documentation/index.html # noqa: E501
"""
import pprint
import re # noqa: F401
import six
from .browse_node import BrowseNode # noqa: F401,E501
from .website_sales_rank import WebsiteSalesRank # noqa: F401,E501
class BrowseNodeInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'browse_nodes': 'list[BrowseNode]',
'website_sales_rank': 'WebsiteSalesRank'
}
attribute_map = {
'browse_nodes': 'BrowseNodes',
'website_sales_rank': 'WebsiteSalesRank'
}
def __init__(self, browse_nodes=None, website_sales_rank=None): # noqa: E501
"""BrowseNodeInfo - a model defined in Swagger""" # noqa: E501
self._browse_nodes = None
self._website_sales_rank = None
self.discriminator = None
if browse_nodes is not None:
self.browse_nodes = browse_nodes
if website_sales_rank is not None:
self.website_sales_rank = website_sales_rank
@property
def browse_nodes(self):
"""Gets the browse_nodes of this BrowseNodeInfo. # noqa: E501
:return: The browse_nodes of this BrowseNodeInfo. # noqa: E501
:rtype: list[BrowseNode]
"""
return self._browse_nodes
@browse_nodes.setter
def browse_nodes(self, browse_nodes):
"""Sets the browse_nodes of this BrowseNodeInfo.
:param browse_nodes: The browse_nodes of this BrowseNodeInfo. # noqa: E501
:type: list[BrowseNode]
"""
self._browse_nodes = browse_nodes
@property
def website_sales_rank(self):
"""Gets the website_sales_rank of this BrowseNodeInfo. # noqa: E501
:return: The website_sales_rank of this BrowseNodeInfo. # noqa: E501
:rtype: WebsiteSalesRank
"""
return self._website_sales_rank
@website_sales_rank.setter
def website_sales_rank(self, website_sales_rank):
"""Sets the website_sales_rank of this BrowseNodeInfo.
:param website_sales_rank: The website_sales_rank of this BrowseNodeInfo. # noqa: E501
:type: WebsiteSalesRank
"""
self._website_sales_rank = website_sales_rank
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BrowseNodeInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BrowseNodeInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.980892 | 95 | 0.619503 |
e15a63067e4ac44d9fcc3899f66fff79b14de19e | 6,685 | py | Python | chemdataextractor/parse/nmr.py | edbeard/chemdataextractor-uvvis2018 | a5750d5313a250468e29d244cd4aeafdfc3250da | [
"MIT"
] | 6 | 2019-12-05T17:10:19.000Z | 2021-08-10T15:15:10.000Z | chemdataextractor/parse/nmr.py | edbeard/chemdataextractor-uvvis2018 | a5750d5313a250468e29d244cd4aeafdfc3250da | [
"MIT"
] | null | null | null | chemdataextractor/parse/nmr.py | edbeard/chemdataextractor-uvvis2018 | a5750d5313a250468e29d244cd4aeafdfc3250da | [
"MIT"
] | 2 | 2020-06-29T06:58:53.000Z | 2021-03-21T08:39:36.000Z | # -*- coding: utf-8 -*-
"""
chemdataextractor.parse.nmr
~~~~~~~~~~~~~~~~~~~~~~~~~~~
NMR text parser.
:copyright: Copyright 2016 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import logging
import re
from ..model import Compound, NmrSpectrum, NmrPeak
from ..utils import first
from .actions import join, merge, strip_stop, fix_whitespace
from .base import BaseParser
from .common import cc, equals
from .cem import chemical_name, nmr_solvent
from .elements import W, I, T, R, Optional, ZeroOrMore, SkipTo, OneOrMore, Not, Group
log = logging.getLogger(__name__)
number = R('^\d+(\.\d+)?$')
nucleus = (
W('1H') | W('13C') | W('15N') | W('31P') | W('19F') | W('11B') | W('29Si') | W('17O') | W('73Ge') | W('195Pt') |
W('33S') | W('13C{1H}') | W('13C{1H') + W('}') | W('H1') | W('C13') | W('N15') | W('P31') | W('F19') | W('B11') |
W('Si29') | W('Ge73') | W('Pt195') | W('S33')
)('nucleus').add_action(merge)
nmr_name = R('^N\.?M\.?R\.?\(?$', re.I).hide()
nmr_name_with_nucleus = R('^(1H|13C)N\.?M\.?R\.?\(?$', re.I, group=1)('nucleus')
frequency = (number('value') + R('^M?Hz$')('units'))('frequency')
delim = R('^[;:,\./]$').hide()
solvent = ((nmr_solvent | chemical_name) + Optional((R('^(\+|&|and)$') | cc) + (nmr_solvent | chemical_name)) + Optional(SkipTo(R('^([;:,\.\)]|at)$'))) + Optional(Optional(delim) + I('solvent').hide()))('solvent').add_action(join).add_action(fix_whitespace)
temp_value = (Optional(R('^[~∼\<\>]$')) + Optional(R('^[\-–−]$')) + R('^[\+\-–−]?\d+(\.\d+)?$'))('value').add_action(merge)
temp_word = (I('room') + R('^temp(erature)?$') | R('^r\.?t\.?$', re.I))('value').add_action(join)
temp_units = (W('°') + R('[CFK]') | W('K'))('units').add_action(merge)
temperature = Optional(I('at').hide()) + Group((temp_value + temp_units) | temp_word)('temperature')
def fix_nmr_peak_whitespace_error(tokens, start, result):
""""""
new_result = []
for e in result:
shift = e.find('shift')
if ',' in shift.text:
for peak_text in shift.text.split(','):
new_e = copy.deepcopy(e)
new_e.find('shift').text = peak_text
new_result.append(new_e)
else:
new_result.append(e)
return new_result
def strip_delta(tokens, start, result):
""""""
for e in result:
for child in e.iter():
if child.text.startswith('δ'):
child.text = child.text[1:]
return result
shift_range = (Optional(R('^[\-–−‒]$')) + (R('^δ?[\+\-–−‒]?\d+(\.+\d+)?[\-–−‒]\d+(\.+\d+)?\.?$') | (R('^[\+\-–−‒]?\d+(\.+\d+)?$') + R('^[\-–−‒]$') + R('^[\+\-–−‒]?\d+(\.+\d+)?\.?$'))))('shift').add_action(merge)
shift_value = (Optional(R('^[\-–−‒]$')) + R('^δ?[\+\-–−‒]?\d+(\.+\d+)?\.?$'))('shift').add_action(merge)
shift_error = (Optional(R('^[\-–−‒]$')) + R('^δ?[\+\-–−‒]?\d+(\.+\d+)?,\d+(\.+\d+)?\.?$'))('shift').add_action(merge)
shift = (shift_range | shift_value | shift_error).add_action(strip_stop).add_action(strip_delta)
split = R('^(br?)?(s|S|d|D|t|T|q|Q|quint|sept|m|M|dd|ddd|dt|td|tt|br|bs|sb|h|ABq|broad|singlet|doublet|triplet|qua(rtet)?|quintet|septet|multiplet|multiple|peaks)$')
multiplicity = (OneOrMore(split) + Optional(W('of') + split))('multiplicity').add_action(join)
coupling_value = (number + ZeroOrMore(R('^[,;&]$') + number + Not(W('H'))))('value').add_action(join)
coupling = ((R('^\d?J([HCNPFD\d,]*|cis|trans)$') + Optional(R('^[\-–−‒]$') + R('^[HCNPF\d]$')) + Optional('=')).hide() + coupling_value + Optional(W('Hz')('units')) + ZeroOrMore(R('^[,;&]$').hide() + coupling_value + W('Hz')('units')))('coupling')
number = (R('^\d+(\.\d+)?[HCNPF]\.?$') | (R('^\d+(\.\d+)?$') + R('^[HCNPF]\.?$')))('number').add_action(merge)
assignment_options = (OneOrMore(R('([CNHOPS\-–−‒=]+\d*[A-Za-z]?′*)+') | chemical_name | R('^(C?quat\.?|Ac|Ar|Ph|linker|bridge)$')) + Optional(W('×') + R('^\d+$')))('assignment').add_action(join)
assignment = Optional(R('^\d{1,2}$')('number') + Optional(W('×')).hide()) + (assignment_options + ZeroOrMore(T('CC').hide() + assignment_options))
note = (W('overlapped') | (W('×') + R('^\d+$')))('note').add_action(join)
peak_meta_options = multiplicity | coupling | number | assignment | note
peak_meta = W('(').hide() + peak_meta_options + ZeroOrMore(ZeroOrMore(delim) + peak_meta_options) + Optional(delim) + W(')').hide()
delta = (R('^[δd][HCNPF]?$') + Optional(equals)).hide()
ppm = Optional(R('^[(\[]$')) + Optional(I('in')) + I('ppm') + Optional(R('^[)\]]$'))
spectrum_meta = Optional(W('(').hide()) + (frequency | solvent | delta | temperature) + ZeroOrMore(Optional(delim) + (frequency | solvent | I('ppm') | delta | temperature)) + Optional(temperature) + Optional(W(')').hide())
prelude_options = spectrum_meta | delta | delim | ppm.hide() | equals.hide()
prelude = ((nucleus + Optional(R('^[\-–−‒]$')).hide() + nmr_name | nmr_name_with_nucleus) + ZeroOrMore(prelude_options)) | (R('^δ[HC]?$')('nucleus') + spectrum_meta + ZeroOrMore(prelude_options))
peak = Optional(delta) + (shift + Not(R('^M?Hz$')) + Optional(ppm).hide() + Optional(peak_meta))('peak').add_action(fix_nmr_peak_whitespace_error)
peaks = (peak + ZeroOrMore(ZeroOrMore(delim | W('and')).hide() + peak))('peaks')
nmr = (prelude + peaks)('nmr')
class NmrParser(BaseParser):
""""""
root = nmr
def __init__(self):
pass
def interpret(self, result, start, end):
c = Compound()
n = NmrSpectrum(
nucleus=first(result.xpath('./nucleus/text()')),
solvent=first(result.xpath('./solvent/text()')),
frequency=first(result.xpath('./frequency/value/text()')),
frequency_units=first(result.xpath('./frequency/units/text()')),
temperature=first(result.xpath('./temperature/value/text()')),
temperature_units=first(result.xpath('./temperature/units/text()'))
)
for peak_result in result.xpath('./peaks/peak'):
nmr_peak = NmrPeak(
shift=first(peak_result.xpath('./shift/text()')),
multiplicity=first(peak_result.xpath('./multiplicity/text()')),
coupling=first(peak_result.xpath('./coupling/value/text()')),
coupling_units=first(peak_result.xpath('./coupling/units/text()')),
number=first(peak_result.xpath('./number/text()')),
assignment=first(peak_result.xpath('./assignment/text()'))
)
n.peaks.append(nmr_peak)
c.nmr_spectra.append(n)
yield c
| 44.865772 | 257 | 0.577711 |
ac2da2f89add792a14eadb69d2d8b44ae798cd56 | 590 | py | Python | nengo/utils/neurons.py | pedrombmachado/nengo | abc85e1a75ce2f980e19eef195d98081f95efd28 | [
"BSD-2-Clause"
] | null | null | null | nengo/utils/neurons.py | pedrombmachado/nengo | abc85e1a75ce2f980e19eef195d98081f95efd28 | [
"BSD-2-Clause"
] | null | null | null | nengo/utils/neurons.py | pedrombmachado/nengo | abc85e1a75ce2f980e19eef195d98081f95efd28 | [
"BSD-2-Clause"
] | null | null | null | from nengo.exceptions import MovedError
def spikes2events(*args, **kwargs):
raise MovedError(location="nengo_extras.neurons")
def _rates_isi_events(*args, **kwargs):
raise MovedError(location="nengo_extras.neurons")
def rates_isi(*args, **kwargs):
raise MovedError(location="nengo_extras.neurons")
def lowpass_filter(*args, **kwargs):
raise MovedError(location="nengo_extras.neurons")
def rates_kernel(*args, **kwargs):
raise MovedError(location="nengo_extras.neurons")
def settled_firingrate(*args, **kwargs):
raise MovedError(location="nengo.neurons")
| 22.692308 | 53 | 0.747458 |
92c416223b4fd21f3533a253df13ff23e2b0ef77 | 14,251 | py | Python | wyze_sdk/service/base.py | mrdranias/wyze-sdk | ab329092eca1f4b00264e18a42da8d0e7e4bf71c | [
"Unlicense"
] | null | null | null | wyze_sdk/service/base.py | mrdranias/wyze-sdk | ab329092eca1f4b00264e18a42da8d0e7e4bf71c | [
"Unlicense"
] | null | null | null | wyze_sdk/service/base.py | mrdranias/wyze-sdk | ab329092eca1f4b00264e18a42da8d0e7e4bf71c | [
"Unlicense"
] | null | null | null | from __future__ import annotations
import logging
import platform
import sys
import time
import uuid
from abc import ABCMeta
from contextlib import suppress
from json import dumps
from typing import Dict, Optional, Union
from urllib.parse import urljoin
import requests
from wyze_sdk import version
from wyze_sdk.errors import WyzeRequestError
from wyze_sdk.signature import RequestVerifier
from .wyze_response import WyzeResponse
class BaseServiceClient(metaclass=ABCMeta):
WYZE_APP_ID = "9319141212m2ik"
WYZE_APP_NAME = "wyze"
WYZE_APP_VERSION = "2.19.14"
WYZE_PHONE_TYPE = 2
def __init__(
self,
token: Optional[str] = None,
base_url: Optional[str] = None,
timeout: int = 30,
headers: Optional[dict] = None,
app_id: Optional[str] = WYZE_APP_ID,
app_name: Optional[str] = WYZE_APP_NAME,
app_version: Optional[str] = WYZE_APP_VERSION,
user_agent_prefix: Optional[str] = None,
user_agent_suffix: Optional[str] = None,
phone_id: Optional[str] = None,
phone_type: Optional[int] = WYZE_PHONE_TYPE,
request_verifier: Optional[RequestVerifier] = None,
logger: Optional[logging.Logger] = None,
):
self.token = None if token is None else token.strip()
self.base_url = base_url
self.timeout = timeout
self.app_id = app_id
self.app_name = app_name
self.app_version = app_version
self.headers = headers or {}
# self.headers["User-Agent"] = self._get_user_agent(
# user_agent_prefix, user_agent_suffix
# )
self.phone_id = phone_id if phone_id else str(uuid.uuid4())
self.phone_type = phone_type
self.default_params = {}
self.request_verifier = request_verifier
self._logger = logger if logger is not None else logging.getLogger(__name__)
def _get_user_agent(self, prefix: Optional[str] = None, suffix: Optional[str] = None):
"""Construct the user-agent header with the package info,
Python version and OS version.
Returns:
The user agent string.
e.g. 'Python/3.6.7 wyzeclient/2.0.0 Darwin/17.7.0'
"""
# __name__ returns all classes, we only want the client
client = "{0}/{1}".format("wyzeclient", version.__version__)
python_version = "Python/{v.major}.{v.minor}.{v.micro}".format(v=sys.version_info)
system_info = "{0}/{1}".format(platform.system(), platform.release())
user_agent_string = " ".join([python_version, client, system_info])
prefix = f"{prefix} " if prefix else ""
suffix = f" {suffix}" if suffix else ""
return prefix + user_agent_string + suffix
def _do_request(
self,
session: requests.Session,
request: requests.Request) -> WyzeResponse:
with suppress(requests.exceptions.HTTPError, requests.exceptions.RequestException, ValueError):
self._logger.info(f"requesting {request.method} to {request.url}")
self._logger.debug(f"headers: {request.headers}")
self._logger.debug(f"body: {request.body}")
settings = session.merge_environment_settings(request.url, {}, None, None, None)
self._logger.debug(f"settings: {settings}")
response = session.send(request, **settings)
return WyzeResponse(
client=self,
http_verb=request.method,
api_url=request.url,
req_args=request.body,
data=response.json(),
headers=response.headers,
status_code=response.status_code,
).validate()
def do_post(self, url: str, headers: dict, payload: dict, params: Optional[dict] = None) -> WyzeResponse:
with requests.Session() as client:
if headers is not None:
# add the request-specific headers
self._logger.debug('merging request-specific headers into session headers')
client.headers.update(headers)
# we have to use a prepared request because the requests module
# doesn't allow us to specify the separators in our json dumping
# and the server expects no extra whitespace
req = client.prepare_request(requests.Request('POST', url, json=payload, params=params))
self._logger.debug('unmodified prepared request')
self._logger.debug(req)
if isinstance(payload, dict):
payload = dumps(payload, separators=(',', ':'))
if isinstance(payload, str):
req.body = payload.encode('utf-8')
req.prepare_content_length(req.body)
return self._do_request(client, req)
def do_get(self, url: str, headers: dict, payload: dict) -> WyzeResponse:
# params = req_args["params"] if "params" in req_args else None
# data = req_args["data"] if "data" in req_args else None
# headers = req_args["headers"] if "headers" in req_args else None
# token = params.get("token") if params and "token" in params else None
# auth = (
# req_args["auth"] if "auth" in req_args else None
# ) # Basic Auth for oauth.v2.access / oauth.access
with requests.Session() as client:
if headers is not None:
# add the request-specific headers
self._logger.debug('merging request-specific headers into session headers')
client.headers.update(headers)
req = client.prepare_request(requests.Request('GET', url, params=payload))
return self._do_request(client, req)
def _nonce(self):
return str(round(time.time() * 1000))
def api_call(
self,
api_endpoint: str,
*,
http_verb: str = "POST",
data: Union[dict] = None,
params: dict = None,
json: dict = None,
headers: dict = None,
auth: dict = None,
) -> WyzeResponse:
"""Create a request and execute the API call to Wyze.
Args:
api_endpoint (str): The target Wyze API endpoint.
e.g. '/app/v2/home_page/get_object_list'
http_verb (str): HTTP Verb. e.g. 'POST'
data: The body to attach to the request. If a dictionary is
provided, form-encoding will take place.
e.g. {'key1': 'value1', 'key2': 'value2'}
params (dict): The URL parameters to append to the URL.
e.g. {'key1': 'value1', 'key2': 'value2'}
json (dict): JSON for the body to attach to the request
(if data is not specified).
e.g. {'key1': 'value1', 'key2': 'value2'}
headers (dict): Additional request headers
auth (dict): A dictionary that consists of access_token and refresh_token
Returns:
(WyzeResponse)
The server's response to an HTTP request. Data
from the response can be accessed like a dict.
Raises:
WyzeApiError: The following Wyze API call failed:
'/app/v2/home_page/get_object_list'.
WyzeRequestError: JSON data can only be submitted as
POST requests.
"""
has_json = json is not None
if has_json and http_verb != "POST":
msg = "JSON data can only be submitted as POST requests. GET requests should use the 'params' argument."
raise WyzeRequestError(msg)
api_url = self._get_url(self.base_url, api_endpoint)
headers = headers or {}
headers.update(self.headers)
if http_verb == "POST":
return self.do_post(url=api_url, headers=headers, payload=json, params=params)
elif http_verb == "GET":
return self.do_get(url=api_url, headers=headers, payload=params)
msg = "Unknown request type."
raise WyzeRequestError(msg)
def _get_url(self, base_url: str, api_endpoint: str) -> str:
"""Joins the base URL and an API endpoint path to form an absolute URL.
Args:
base_url (str): The base URL. e.g. 'https://api.wyzecam.com'
api_endpoint (str): The API path. e.g. '/app/v2/home_page/get_object_list'
Returns:
The absolute endpoint URL.
e.g. 'https://api.wyzecam.com/app/v2/home_page/get_object_list'
"""
return urljoin(base_url, api_endpoint)
def _get_headers(
self,
*,
headers: dict,
signature: Optional[str] = None,
signature2: Optional[str] = None,
has_json: bool,
request_specific_headers: Optional[dict],
) -> Dict[str, str]:
"""Constructs the headers needed for a request.
Args:
has_json (bool): Whether or not the request has json.
has_files (bool): Whether or not the request has files.
request_specific_headers (dict): Additional headers specified by the user for a specific request.
Returns:
The headers dictionary.
e.g. {
'Content-Type': 'application/json;charset=utf-8',
'Signature': 'erewf3254rgt453f34f..==',
'User-Agent': 'Python/3.6.8 wyzeclient/2.1.0 Darwin/17.7.0'
}
"""
final_headers = {
# "Content-Type": "application/json;charset=utf-8",
'Accept-Encoding': 'gzip',
}
if headers is None or "User-Agent" not in headers:
final_headers["User-Agent"] = "okhttp/4.7.2" # self._get_user_agent()
if signature:
final_headers.update({"Signature": "{}".format(signature)})
if signature2:
final_headers.update({"Signature2": "{}".format(signature2)})
if headers is None:
headers = {}
# Merge headers specified at client initialization.
final_headers.update(headers)
# Merge headers specified for a specific request. e.g. oauth.access
if request_specific_headers:
final_headers.update(request_specific_headers)
if has_json:
final_headers.update({"Content-Type": "application/json;charset=utf-8"})
return final_headers
def get_sorted_params(self, params: dict = {}) -> str:
return '&'.join(map(lambda x: x[0] + '=' + str(x[1]), params))
class WpkNetServiceClient(BaseServiceClient, metaclass=ABCMeta):
"""
wpk net service client is the wrapper to newer Wyze services like WpkWyzeSignatureService and WpkWyzeExService.
"""
WYZE_APP_NAME = "com.hualai"
WYZE_SALTS = {
"9319141212m2ik": "wyze_app_secret_key_132",
"venp_4c30f812828de875": "CVCSNoa0ALsNEpgKls6ybVTVOmGzFoiq",
}
def __init__(
self,
token: Optional[str] = None,
base_url: Optional[str] = "https://api.wyzecam.com/",
app_name: str = WYZE_APP_NAME,
app_id: str = BaseServiceClient.WYZE_APP_ID,
request_verifier: RequestVerifier = None
):
super().__init__(
token=token,
base_url=base_url,
app_name=app_name,
app_id=app_id,
request_verifier=request_verifier if request_verifier is not None else RequestVerifier(signing_secret=WpkNetServiceClient.WYZE_SALTS[app_id], access_token=token)
)
def _get_headers(
self,
*,
request_specific_headers: Optional[dict] = None,
nonce: int = None,
) -> Dict[str, str]:
if request_specific_headers is None:
request_specific_headers = {}
request_specific_headers.update({
'access_token': self.token,
'requestid': self.request_verifier.request_id(nonce),
})
return super()._get_headers(headers=None, has_json=False, request_specific_headers=request_specific_headers)
def api_call(
self,
api_method: str,
*,
http_verb: str = "POST",
params: dict = None,
json: dict = None,
headers: Optional[dict] = None,
nonce: int = None,
) -> WyzeResponse:
if headers is None:
headers = {}
if http_verb == "POST":
# this must be done here so that it will be included in the signing
if json is None:
json = {}
json['nonce'] = str(nonce)
request_data = dumps(json, separators=(',', ':'))
headers.update({
'signature2': self.request_verifier.generate_dynamic_signature(timestamp=nonce, body=request_data)
})
elif http_verb == "GET":
if params is None:
params = {}
# this must be done here so that it will be included in the signing
params['nonce'] = nonce
headers.update({
'signature2': self.request_verifier.generate_dynamic_signature(timestamp=nonce, body=self.get_sorted_params(sorted(params.items())))
})
return super().api_call(
api_method,
http_verb=http_verb,
data=None,
params=params,
json=json,
headers=self._get_headers(request_specific_headers=headers, nonce=nonce),
auth=None,
)
class ExServiceClient(WpkNetServiceClient, metaclass=ABCMeta):
"""
ex service client is the wrapper for WpkWyzeExService.
"""
def _get_headers(
self,
*,
request_specific_headers: Optional[dict] = None,
nonce: Optional[int] = None,
) -> Dict[str, str]:
if request_specific_headers is None:
request_specific_headers = {}
request_specific_headers.update({
'appid': self.app_id,
'appinfo': f"wyze_android_{self.app_version}",
'phoneid': self.phone_id,
})
return super()._get_headers(request_specific_headers=request_specific_headers)
class SignatureServiceClient(WpkNetServiceClient, metaclass=ABCMeta):
"""
signature service client is the wrapper for WpkWyzeSignatureService
"""
| 37.901596 | 173 | 0.604308 |
a7e7408c9152580b48d14a73a3cd7970174eb732 | 949 | py | Python | azure-cognitiveservices-vision-customvision/azure/cognitiveservices/vision/customvision/training/models/image_tag_create_batch_py3.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-cognitiveservices-vision-customvision/azure/cognitiveservices/vision/customvision/training/models/image_tag_create_batch_py3.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | null | null | null | azure-cognitiveservices-vision-customvision/azure/cognitiveservices/vision/customvision/training/models/image_tag_create_batch_py3.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageTagCreateBatch(Model):
"""ImageTagCreateBatch.
:param tags:
:type tags:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageTagCreateEntry]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '[ImageTagCreateEntry]'},
}
def __init__(self, *, tags=None, **kwargs) -> None:
super(ImageTagCreateBatch, self).__init__(**kwargs)
self.tags = tags
| 31.633333 | 91 | 0.586934 |
fbf02a8244b372c9e2ed168da237cb2c18234951 | 3,772 | py | Python | lemur_cms/settings.py | lemur-cms/lemur-cms | 6dfa087f9c16f1452c0af935855abfedd73fa239 | [
"BSD-2-Clause"
] | null | null | null | lemur_cms/settings.py | lemur-cms/lemur-cms | 6dfa087f9c16f1452c0af935855abfedd73fa239 | [
"BSD-2-Clause"
] | 2 | 2022-02-12T23:03:18.000Z | 2022-02-26T19:08:27.000Z | lemur_cms/settings.py | lemur-cms/lemur-cms | 6dfa087f9c16f1452c0af935855abfedd73fa239 | [
"BSD-2-Clause"
] | 1 | 2019-10-19T22:21:33.000Z | 2019-10-19T22:21:33.000Z | from __future__ import absolute_import
import os
from django.apps import apps
from lemur_cms.base import lemur_cms, default
from lemur_cms.utils.settings import merge
import os
import six
import logging
import warnings
from django.apps import apps
from lemur_cms.conf.spec import DJANGO_CONF
from lemur_cms.base import lemur_cms, default
from lemur_cms.utils.settings import (get_conf_from_module, merge,
get_loaded_modules)
from importlib import import_module # noqa
from django.utils.module_loading import module_has_submodule # noqa
from lemur_cms.conf.default import *
"""
Lemur CMS settings
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0vks5u62=+q71pwlgg4np(e8-(r21nck7(a_(c%yjr5gnr==r='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
APPS = merge(APPS, default.core)
if not apps.ready:
# load directly specified apps
lemur_cms.get_app_modules(APPS)
# propagate settings to lemur_cms
lemur_cms.MODULES_AUTOLOAD = LEMURCMS_MODULE_AUTO_INCLUDE
# load all modules
lemur_cms.load_modules()
# just propagate all loaded modules to settings
LEMURCMS_MODULES = lemur_cms.get_modules()
# iterate over sorted modules
for mod, mod_cfg in LEMURCMS_MODULES:
try:
# go through django keys and merge it to main settings
for key in DJANGO_CONF.keys():
updated_value = mod_cfg.get_value(key, globals()[key])
globals()[key] = updated_value
locals()[key] = updated_value
# map value to lemur_cms but under our internal name
setattr(lemur_cms, DJANGO_CONF[key], updated_value)
if mod_cfg.urls_conf:
MODULE_URLS[mod_cfg.urls_conf] = {'is_public': mod_cfg.public}
except Exception as e:
warnings.warn(
'Exception "{}" raised during loading '
'module {}'.format(str(e), mod))
else:
warnings.warn("LemurCMS modules are already loaded. Skiped now.")
setattr(lemur_cms, 'widgets', WIDGETS)
# and again merge core with others
APPS = merge(APPS, default.core)
WSGI_APPLICATION = 'lemur_cms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'lemur_db'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
LANGUAGES = [
('en', 'EN')
]
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# cors headers
CORS_ORIGIN_WHITELIST = [
'http://localhost:3000',
'http://localhost:5000',
'http://localhost:8000',
'http://localhost:8080',
]
| 26.013793 | 91 | 0.688759 |
403bdbad1bd39062aa5916f154e0f36dd271e115 | 735 | py | Python | docs/examples/container/kubernetes/instantiate_driver.py | rgharris/libcloud | 90971e17bfd7b6bb97b2489986472c531cc8e140 | [
"Apache-2.0"
] | null | null | null | docs/examples/container/kubernetes/instantiate_driver.py | rgharris/libcloud | 90971e17bfd7b6bb97b2489986472c531cc8e140 | [
"Apache-2.0"
] | 1 | 2021-12-06T12:29:13.000Z | 2021-12-06T12:29:13.000Z | docs/examples/container/kubernetes/instantiate_driver.py | rgharris/libcloud | 90971e17bfd7b6bb97b2489986472c531cc8e140 | [
"Apache-2.0"
] | 1 | 2019-08-05T10:12:02.000Z | 2019-08-05T10:12:02.000Z | from libcloud.container.types import Provider
from libcloud.container.providers import get_driver
cls = get_driver(Provider.KUBERNETES)
# 1. Client side cert auth
conn = cls(
host="192.168.99.103",
port=8443,
secure=True,
key_file="/home/user/.minikube/client.key",
cert_file="/home/user/.minikube/client.crt",
ca_cert="/home/user/.minikube/ca.crt",
)
# 2. Bearer bootstrap token auth
conn = cls(key="my_token", host="126.32.21.4", ex_token_bearer_auth=True)
# 3. Basic auth
conn = cls(
key="my_username", secret="THIS_IS)+_MY_SECRET_KEY+I6TVkv68o4H", host="126.32.21.4"
)
for container in conn.list_containers():
print(container.name)
for cluster in conn.list_clusters():
print(cluster.name)
| 25.344828 | 87 | 0.717007 |
258c730049a5832c9489afb89c6c88f5b191e95c | 12,757 | py | Python | tests/requirements.py | DanCardin/alembic | 2719c0ef1917af0b74b9d642a75c9b6dd5cf537a | [
"MIT"
] | 1,324 | 2018-11-27T05:44:41.000Z | 2022-03-30T19:49:20.000Z | tests/requirements.py | DanCardin/alembic | 2719c0ef1917af0b74b9d642a75c9b6dd5cf537a | [
"MIT"
] | 452 | 2018-11-27T22:43:38.000Z | 2022-03-28T04:33:43.000Z | tests/requirements.py | DanCardin/alembic | 2719c0ef1917af0b74b9d642a75c9b6dd5cf537a | [
"MIT"
] | 159 | 2018-11-29T18:46:15.000Z | 2022-03-28T16:34:19.000Z | from sqlalchemy import exc as sqla_exc
from sqlalchemy import text
from alembic.testing import exclusions
from alembic.testing.requirements import SuiteRequirements
from alembic.util import compat
from alembic.util import sqla_compat
class DefaultRequirements(SuiteRequirements):
@property
def unicode_string(self):
return exclusions.skip_if(["oracle"])
@property
def alter_column(self):
return exclusions.skip_if(["sqlite"], "no ALTER COLUMN support")
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.skip_if(["sqlite", "firebird"], "no schema support")
@property
def no_referential_integrity(self):
"""test will fail if referential integrity is enforced"""
return exclusions.fails_on_everything_except("sqlite")
@property
def non_native_boolean(self):
"""test will fail if native boolean is provided"""
return exclusions.fails_if(
exclusions.LambdaPredicate(
lambda config: config.db.dialect.supports_native_boolean
)
)
@property
def non_native_boolean_check_constraint(self):
"""backend creates a check constraint for booleans if enabled"""
return exclusions.only_on(
exclusions.LambdaPredicate(
lambda config: not config.db.dialect.supports_native_boolean
and config.db.dialect.non_native_boolean_check_constraint
)
)
@property
def check_constraints_w_enforcement(self):
return exclusions.fails_on(["mysql", "mariadb"])
@property
def unnamed_constraints(self):
"""constraints without names are supported."""
return exclusions.only_on(["sqlite"])
@property
def fk_names(self):
"""foreign key constraints always have names in the DB"""
return exclusions.fails_on("sqlite")
@property
def reflects_fk_options(self):
return exclusions.open()
@property
def fk_initially(self):
"""backend supports INITIALLY option in foreign keys"""
return exclusions.only_on(["postgresql"])
@property
def fk_deferrable(self):
"""backend supports DEFERRABLE option in foreign keys"""
return exclusions.only_on(["postgresql", "oracle"])
@property
def fk_deferrable_is_reflected(self):
return self.fk_deferrable + exclusions.fails_on("oracle")
@property
def fk_ondelete_restrict(self):
return exclusions.only_on(["postgresql", "sqlite", "mysql"])
@property
def fk_onupdate_restrict(self):
return self.fk_onupdate + exclusions.fails_on(["mssql"])
@property
def fk_ondelete_noaction(self):
return exclusions.only_on(
["postgresql", "mysql", "mariadb", "sqlite", "mssql"]
)
@property
def fk_ondelete_is_reflected(self):
def go(config):
if exclusions.against(config, "mssql"):
return not sqla_compat.sqla_14_26
else:
return False
return exclusions.fails_if(go)
@property
def fk_onupdate_is_reflected(self):
def go(config):
if exclusions.against(config, "mssql"):
return not sqla_compat.sqla_14_26
else:
return False
return self.fk_onupdate + exclusions.fails_if(go)
@property
def fk_onupdate(self):
return exclusions.only_on(
["postgresql", "mysql", "mariadb", "sqlite", "mssql"]
)
@property
def reflects_unique_constraints_unambiguously(self):
return exclusions.fails_on(["mysql", "mariadb", "oracle"])
@property
def reflects_indexes_w_sorting(self):
# TODO: figure out what's happening on the SQLAlchemy side
# when we reflect an index that has asc() / desc() on the column
return exclusions.fails_on(["oracle"])
@property
def long_names(self):
if sqla_compat.sqla_14:
return exclusions.skip_if("oracle<18")
else:
return exclusions.skip_if("oracle")
@property
def reflects_pk_names(self):
"""Target driver reflects the name of primary key constraints."""
return exclusions.fails_on_everything_except(
"postgresql", "oracle", "mssql", "sybase", "sqlite"
)
@property
def datetime_timezone(self):
"""target dialect supports timezone with datetime types."""
return exclusions.only_on(["postgresql"])
@property
def postgresql(self):
return exclusions.only_on(["postgresql"])
@property
def mysql(self):
return exclusions.only_on(["mysql", "mariadb"])
@property
def oracle(self):
return exclusions.only_on(["oracle"])
@property
def mssql(self):
return exclusions.only_on(["mssql"])
@property
def postgresql_uuid_ossp(self):
def check_uuid_ossp(config):
if not exclusions.against(config, "postgresql"):
return False
try:
config.db.execute("SELECT uuid_generate_v4()")
return True
except:
return False
return exclusions.only_if(check_uuid_ossp)
def _has_pg_extension(self, name):
def check(config):
if not exclusions.against(config, "postgresql"):
return False
with config.db.connect() as conn:
count = conn.scalar(
text(
"SELECT count(*) FROM pg_extension "
"WHERE extname='%s'" % name
)
)
return bool(count)
return exclusions.only_if(check, "needs %s extension" % name)
@property
def hstore(self):
return self._has_pg_extension("hstore")
@property
def btree_gist(self):
return self._has_pg_extension("btree_gist")
@property
def autoincrement_on_composite_pk(self):
return exclusions.skip_if(["sqlite"], "not supported by database")
@property
def integer_subtype_comparisons(self):
"""if a compare of Integer and BigInteger is supported yet."""
return exclusions.skip_if(["oracle"], "not supported by alembic impl")
@property
def autocommit_isolation(self):
"""target database should support 'AUTOCOMMIT' isolation level"""
return exclusions.only_on(["postgresql", "mysql", "mariadb"])
@property
def computed_columns(self):
# TODO: in theory if these could come from SQLAlchemy dialects
# that would be helpful
return self.computed_columns_api + exclusions.skip_if(
["postgresql < 12", "sqlite < 3.31", "mysql < 5.7"]
)
@property
def computed_reflects_as_server_default(self):
# note that this rule will go away when SQLAlchemy correctly
# supports reflection of the "computed" construct; the element
# will consistently be present as both column.computed and
# column.server_default for all supported backends.
return (
self.computed_columns
+ exclusions.only_if(
["postgresql", "oracle"],
"backend reflects computed construct as a server default",
)
+ exclusions.skip_if(self.computed_reflects_normally)
)
@property
def computed_doesnt_reflect_as_server_default(self):
# note that this rule will go away when SQLAlchemy correctly
# supports reflection of the "computed" construct; the element
# will consistently be present as both column.computed and
# column.server_default for all supported backends.
return (
self.computed_columns
+ exclusions.skip_if(
["postgresql", "oracle"],
"backend reflects computed construct as a server default",
)
+ exclusions.skip_if(self.computed_reflects_normally)
)
@property
def check_constraint_reflection(self):
return exclusions.fails_on_everything_except(
"postgresql",
"sqlite",
"oracle",
self._mysql_and_check_constraints_exist,
)
def mysql_check_col_name_change(self, config):
# MySQL has check constraints that enforce an reflect, however
# they prevent a column's name from being changed due to a bug in
# MariaDB 10.2 as well as MySQL 8.0.16
if exclusions.against(config, ["mysql", "mariadb"]):
if sqla_compat._is_mariadb(config.db.dialect):
mnvi = sqla_compat._mariadb_normalized_version_info
norm_version_info = mnvi(config.db.dialect)
return norm_version_info >= (10, 2) and norm_version_info < (
10,
2,
22,
)
else:
norm_version_info = config.db.dialect.server_version_info
return norm_version_info >= (8, 0, 16)
else:
return True
def _mysql_and_check_constraints_exist(self, config):
# 1. we have mysql / mariadb and
# 2. it enforces check constraints
if exclusions.against(config, ["mysql", "mariadb"]):
if sqla_compat._is_mariadb(config.db.dialect):
mnvi = sqla_compat._mariadb_normalized_version_info
norm_version_info = mnvi(config.db.dialect)
return norm_version_info >= (10, 2)
else:
norm_version_info = config.db.dialect.server_version_info
return norm_version_info >= (8, 0, 16)
else:
return False
@property
def json_type(self):
return exclusions.only_on(
[
lambda config: exclusions.against(config, "mysql")
and (
(
not config.db.dialect._is_mariadb
and exclusions.against(config, "mysql >= 5.7")
)
or (
config.db.dialect._mariadb_normalized_version_info
>= (10, 2, 7)
)
),
"mariadb>=10.2.7",
"postgresql >= 9.3",
self._sqlite_json,
self._mssql_json,
]
)
def _mssql_json(self, config):
if not sqla_compat.sqla_14:
return False
else:
return exclusions.against(config, "mssql")
def _sqlite_json(self, config):
if not sqla_compat.sqla_14:
return False
elif not exclusions.against(config, "sqlite >= 3.9"):
return False
else:
with config.db.connect() as conn:
try:
return (
conn.execute(
text(
"""select json_extract('{"foo": "bar"}', """
"""'$."foo"')"""
)
).scalar()
== "bar"
)
except sqla_exc.DBAPIError:
return False
@property
def identity_columns(self):
# TODO: in theory if these could come from SQLAlchemy dialects
# that would be helpful
return self.identity_columns_api + exclusions.only_on(
["postgresql >= 10", "oracle >= 12", "mssql"]
)
@property
def identity_columns_alter(self):
# TODO: in theory if these could come from SQLAlchemy dialects
# that would be helpful
return self.identity_columns_api + exclusions.only_on(
["postgresql >= 10", "oracle >= 12"]
)
@property
def supports_identity_on_null(self):
return self.identity_columns + exclusions.only_on(["oracle"])
@property
def legacy_engine(self):
return exclusions.only_if(
lambda config: not getattr(config.db, "_is_future", False)
)
@property
def stubs_test(self):
def requirements():
try:
import black # noqa
import zimports # noqa
return False
except Exception:
return True
imports = exclusions.skip_if(
requirements, "black and zimports are required for this test"
)
version = exclusions.only_if(
lambda _: compat.py39, "python 3.9 is required"
)
return imports + version
| 32.214646 | 78 | 0.586502 |
9485a808d7b665f3fe3f2188435263697d4a4d59 | 2,376 | py | Python | src/Algopylib/algo/sort.py | siddhartha18101/Algopylib | 5890fbb20ae9998a4ade9f7e3df602b24e49ea17 | [
"MIT"
] | null | null | null | src/Algopylib/algo/sort.py | siddhartha18101/Algopylib | 5890fbb20ae9998a4ade9f7e3df602b24e49ea17 | [
"MIT"
] | null | null | null | src/Algopylib/algo/sort.py | siddhartha18101/Algopylib | 5890fbb20ae9998a4ade9f7e3df602b24e49ea17 | [
"MIT"
] | null | null | null | from typing import List, Union
def bubble_sort(arr : List , simulation : bool = False) -> List:
"""Sorts A List using bubble sort algorithm
https://en.wikipedia.org/wiki/Bubble_sort
Worst-case performance: O(N^2)
Parameters:
arr(List) : Unsorted List
simulation(bool) : to enable simulation (default argument is False)
Returns:
arr(List) : Returns sorted List
"""
def swap(i : int, j : int) -> None:
"""Swaps two element of List
Parameters:
i(int) : index of first element
j(int) : index of second element
Returns:
None : Function returns nothing
"""
arr[i], arr[j] = arr[j], arr[i]
n : int = len(arr)
swapped : bool = True
iteration : int = 0
if simulation:
print("iteration",iteration,":",*arr)
x : int = -1
while swapped:
swapped = False
x = x + 1
for i in range(1, n-x):
if arr[i - 1] > arr[i]:
swap(i - 1, i)
swapped = True
if simulation:
iteration = iteration + 1
print("iteration",iteration,":",*arr)
return arr
def insertion_sort(arr : List , simulation : bool = False) -> List:
""" Insertion Sort
Complexity: O(n^2)
1: Iterate from arr[1] to arr[n] over the array.
2: Compare the current element (key) to its predecessor.
3: If the key element is smaller than its predecessor, compare it to the elements before. Move the greater elements one position up to make space for the swapped element.
"""
iteration : int = 0
if simulation:
print("iteration", iteration, ":", *arr)
for i in range(len(arr)):
cursor : Union[int, float, complex, str] = arr[i]
pos : int = i
""" Move elements of arr[0..i-1], that are greater than key, to one position ahead of their current position"""
while pos > 0 and arr[pos - 1] > cursor:
""" Swap the number down the list"""
arr[pos] = arr[pos - 1]
pos = pos - 1
""" Break and do the final swap"""
arr[pos] = cursor
if simulation:
iteration = iteration + 1
print("iteration",iteration,":",*arr)
return arr | 30.461538 | 174 | 0.539983 |
9e4cb5022fa7a44ee0abf8b70020c173a1196146 | 4,758 | py | Python | nova/api/openstack/compute/snapshot.py | xuweiliang/Codelibrary | 54e45b2daa205132c05b0ff5a2c3db7fca2853a7 | [
"Apache-2.0"
] | null | null | null | nova/api/openstack/compute/snapshot.py | xuweiliang/Codelibrary | 54e45b2daa205132c05b0ff5a2c3db7fca2853a7 | [
"Apache-2.0"
] | null | null | null | nova/api/openstack/compute/snapshot.py | xuweiliang/Codelibrary | 54e45b2daa205132c05b0ff5a2c3db7fca2853a7 | [
"Apache-2.0"
] | null | null | null |
from oslo_utils import strutils
import webob
from nova.api.openstack import common
from nova import compute
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from oslo_log import log as logging
from nova import exception
from nova.i18n import _
from nova import utils
ALIAS = "os-snapshot"
LOG = logging.getLogger(__name__)
class SnapshotController(wsgi.Controller):
"""the Documents API Controller declearation"""
def __init__(self):
super(SnapshotController, self).__init__()
self.compute_api = compute.API()
def _get_params(self, req, key):
if key:
return req.params.get(key, None)
return
@extensions.expected_errors((400, 404, 501))
def revert_dev_snapshot(self, req, id):
context = req.environ['nova.context']
name = self._get_params(req, "name")
try:
instance = common.get_instance(self.compute_api,
context, id)
self.compute_api.dev_snapshot_revert(context, instance, name)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except NotImplementedError:
msg = _("set recovery function error.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return {"snapshot":{"execute":True}}
@extensions.expected_errors((400, 404, 501))
def set_dev_snapshot(self, req, id):
context = req.environ['nova.context']
name = self._get_params(req, "name")
try:
instance = common.get_instance(self.compute_api,
context, id)
if name:
instance.dev_snapshot = name
instance.save()
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except NotImplementedError:
msg = _("set recovery function error.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return {"snapshot":{"execute":True}}
@extensions.expected_errors(404)
def index(self, req):
context = req.environ['nova.context']
instance_id = self._get_params(req, "instance_id")
instance = common.get_instance(self.compute_api,
context, instance_id)
try:
snapshot = self.compute_api.dev_snapshot_list(context, instance)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except NotImplementedError:
msg = _("does not support this function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return {'snapshot':snapshot}
@extensions.expected_errors(400)
def create(self, req, body):
context = req.environ['nova.context']
if 'instance_id' not in body['snapshot'] \
and 'name' not in body['snapshot']:
return
instance_id = body['snapshot']['instance_id']
snapshot_name = body['snapshot']['name']
try:
instance = common.get_instance(self.compute_api,
context, instance_id)
self.compute_api.dev_snapshot_create(context, instance, snapshot_name)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except NotImplementedError:
msg = _("create dev snapshot function error.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return {"snapshot":{"execute":True}}
@extensions.expected_errors(400)
def delete(self, req, id):
context = req.environ['nova.context']
name = self._get_params(req, 'name')
try:
instance = common.get_instance(self.compute_api,
context, id)
self.compute_api.dev_snapshot_delete(context, instance, name)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except NotImplementedError:
msg = _("delete dev snapshot function error.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
class DevSnapshot(extensions.V21APIExtensionBase):
"""Documents ExtensionDescriptor implementation"""
name = "DevSnapshot"
alias = ALIAS
version = 1
def get_resources(self):
resource = extensions.ResourceExtension(ALIAS, SnapshotController(),
member_actions={'set_dev_snapshot':'GET',
'revert_dev_snapshot':'GET'})
return [resource]
def get_controller_extensions(self):
return []
| 37.171875 | 82 | 0.6219 |
b4a5887c593bd7a9995f589acd3bd2b7aaa0fab9 | 10,959 | py | Python | python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 1 | 2021-09-24T11:23:36.000Z | 2021-09-24T11:23:36.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
paddle.framework.set_default_dtype("float64")
paddle.enable_static()
import numpy as np
import unittest
from convert import convert_params_for_cell_static
from rnn_numpy import SimpleRNNCell, LSTMCell, GRUCell
class TestSimpleRNNCell(unittest.TestCase):
def __init__(self, bias=True, place="cpu"):
super(TestSimpleRNNCell, self).__init__(methodName="runTest")
self.bias = bias
self.place = paddle.CPUPlace() if place == "cpu" \
else paddle.CUDAPlace(0)
def setUp(self):
rnn1 = SimpleRNNCell(16, 32, bias=self.bias)
mp = paddle.static.Program()
sp = paddle.static.Program()
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
rnn2 = paddle.nn.SimpleRNNCell(16,
32,
bias_ih_attr=self.bias,
bias_hh_attr=self.bias)
place = self.place
exe = paddle.static.Executor(place)
scope = paddle.fluid.Scope()
with paddle.static.scope_guard(scope):
exe.run(sp)
convert_params_for_cell_static(rnn1, rnn2, place)
self.mp = mp
self.sp = sp
self.rnn1 = rnn1
self.rnn2 = rnn2
self.executor = exe
self.scope = scope
def test_with_initial_state(self):
mp = self.mp.clone()
sp = self.sp
rnn1 = self.rnn1
rnn2 = self.rnn2
exe = self.executor
scope = self.scope
x = np.random.randn(4, 16)
prev_h = np.random.randn(4, 32)
y1, h1 = rnn1(x, prev_h)
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
init_h = paddle.fluid.data(
"init_h", [-1, 32],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data, init_h)
feed_dict = {x_data.name: x, init_h.name: prev_h}
with paddle.static.scope_guard(scope):
y2, h2 = exe.run(mp, feed=feed_dict, fetch_list=[y, h])
np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5)
def test_with_zero_state(self):
mp = self.mp.clone()
sp = self.sp
rnn1 = self.rnn1
rnn2 = self.rnn2
exe = self.executor
scope = self.scope
x = np.random.randn(4, 16)
y1, h1 = rnn1(x)
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data)
feed_dict = {x_data.name: x}
with paddle.static.scope_guard(scope):
y2, h2 = exe.run(mp,
feed=feed_dict,
fetch_list=[y, h],
use_prune=True)
np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5)
def runTest(self):
self.test_with_initial_state()
self.test_with_zero_state()
class TestGRUCell(unittest.TestCase):
def __init__(self, bias=True, place="cpu"):
super(TestGRUCell, self).__init__(methodName="runTest")
self.bias = bias
self.place = paddle.CPUPlace() if place == "cpu" \
else paddle.CUDAPlace(0)
def setUp(self):
rnn1 = GRUCell(16, 32, bias=self.bias)
mp = paddle.static.Program()
sp = paddle.static.Program()
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
rnn2 = paddle.nn.GRUCell(16,
32,
bias_ih_attr=self.bias,
bias_hh_attr=self.bias)
place = self.place
exe = paddle.static.Executor(place)
scope = paddle.fluid.Scope()
with paddle.static.scope_guard(scope):
exe.run(sp)
convert_params_for_cell_static(rnn1, rnn2, place)
self.mp = mp
self.sp = sp
self.rnn1 = rnn1
self.rnn2 = rnn2
self.place = place
self.executor = exe
self.scope = scope
def test_with_initial_state(self):
mp = self.mp.clone()
sp = self.sp
rnn1 = self.rnn1
rnn2 = self.rnn2
exe = self.executor
scope = self.scope
x = np.random.randn(4, 16)
prev_h = np.random.randn(4, 32)
y1, h1 = rnn1(x, prev_h)
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
init_h = paddle.fluid.data(
"init_h", [-1, 32],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data, init_h)
feed_dict = {x_data.name: x, init_h.name: prev_h}
with paddle.static.scope_guard(scope):
y2, h2 = exe.run(mp, feed=feed_dict, fetch_list=[y, h])
np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5)
def test_with_zero_state(self):
mp = self.mp.clone()
sp = self.sp
rnn1 = self.rnn1
rnn2 = self.rnn2
exe = self.executor
scope = self.scope
x = np.random.randn(4, 16)
y1, h1 = rnn1(x)
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
y, h = rnn2(x_data)
feed_dict = {x_data.name: x}
with paddle.static.scope_guard(scope):
y2, h2 = exe.run(mp,
feed=feed_dict,
fetch_list=[y, h],
use_prune=True)
np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5)
def runTest(self):
self.test_with_initial_state()
self.test_with_zero_state()
class TestLSTMCell(unittest.TestCase):
def __init__(self, bias=True, place="cpu"):
super(TestLSTMCell, self).__init__(methodName="runTest")
self.bias = bias
self.place = paddle.CPUPlace() if place == "cpu" \
else paddle.CUDAPlace(0)
def setUp(self):
rnn1 = LSTMCell(16, 32, bias=self.bias)
mp = paddle.static.Program()
sp = paddle.static.Program()
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
rnn2 = paddle.nn.LSTMCell(16,
32,
bias_ih_attr=self.bias,
bias_hh_attr=self.bias)
place = self.place
exe = paddle.static.Executor(place)
scope = paddle.fluid.Scope()
with paddle.static.scope_guard(scope):
exe.run(sp)
convert_params_for_cell_static(rnn1, rnn2, place)
self.mp = mp
self.sp = sp
self.rnn1 = rnn1
self.rnn2 = rnn2
self.place = place
self.executor = exe
self.scope = scope
def test_with_initial_state(self):
mp = self.mp.clone()
sp = self.sp
rnn1 = self.rnn1
rnn2 = self.rnn2
exe = self.executor
scope = self.scope
x = np.random.randn(4, 16)
prev_h = np.random.randn(4, 32)
prev_c = np.random.randn(4, 32)
y1, (h1, c1) = rnn1(x, (prev_h, prev_c))
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
init_h = paddle.fluid.data(
"init_h", [-1, 32],
dtype=paddle.framework.get_default_dtype())
init_c = paddle.fluid.data(
"init_c", [-1, 32],
dtype=paddle.framework.get_default_dtype())
y, (h, c) = rnn2(x_data, (init_h, init_c))
feed_dict = {x_data.name: x, init_h.name: prev_h, init_c.name: prev_c}
with paddle.static.scope_guard(scope):
y2, h2, c2 = exe.run(mp, feed=feed_dict, fetch_list=[y, h, c])
np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(c1, c2, atol=1e-8, rtol=1e-5)
def test_with_zero_state(self):
mp = self.mp.clone()
sp = self.sp
rnn1 = self.rnn1
rnn2 = self.rnn2
exe = self.executor
scope = self.scope
x = np.random.randn(4, 16)
y1, (h1, c1) = rnn1(x)
with paddle.fluid.unique_name.guard():
with paddle.static.program_guard(mp, sp):
x_data = paddle.fluid.data(
"input", [-1, 16],
dtype=paddle.framework.get_default_dtype())
y, (h, c) = rnn2(x_data)
feed_dict = {x_data.name: x}
with paddle.static.scope_guard(scope):
y2, h2, c2 = exe.run(mp,
feed=feed_dict,
fetch_list=[y, h, c],
use_prune=True)
np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(c1, c2, atol=1e-8, rtol=1e-5)
def runTest(self):
self.test_with_initial_state()
self.test_with_zero_state()
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
devices = ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() \
else ["cpu"]
for bias in [True, False]:
for device in devices:
for test_class in [TestSimpleRNNCell, TestGRUCell, TestLSTMCell]:
suite.addTest(test_class(bias, device))
return suite
| 32.423077 | 78 | 0.543024 |
766d42b96a08d22c77f3f38d1898747f5efa2c2a | 2,531 | py | Python | utils/poincare.py | thanhhoangvan/Authentication-system-using-fingerprint | 2f7819d959f8d8b737dc0c3e8c4d7eda8e0edd94 | [
"Apache-2.0"
] | 1 | 2021-07-06T00:20:56.000Z | 2021-07-06T00:20:56.000Z | utils/poincare.py | thanhhoangvan/Authentication-system-using-fingerprint | 2f7819d959f8d8b737dc0c3e8c4d7eda8e0edd94 | [
"Apache-2.0"
] | null | null | null | utils/poincare.py | thanhhoangvan/Authentication-system-using-fingerprint | 2f7819d959f8d8b737dc0c3e8c4d7eda8e0edd94 | [
"Apache-2.0"
] | null | null | null | from utils import orientation
import math
import cv2 as cv
import numpy as np
def poincare_index_at(i, j, angles, tolerance):
"""
compute the summation difference between the adjacent orientations such that the orientations is less then 90 degrees
https://books.google.pl/books?id=1Wpx25D8qOwC&lpg=PA120&ots=9wRY0Rosb7&dq=poincare%20index%20fingerprint&hl=pl&pg=PA120#v=onepage&q=poincare%20index%20fingerprint&f=false
:param i:
:param j:
:param angles:
:param tolerance:
:return:
"""
cells = [(-1, -1), (-1, 0), (-1, 1), # p1 p2 p3
(0, 1), (1, 1), (1, 0), # p8 p4
(1, -1), (0, -1), (-1, -1)] # p7 p6 p5
angles_around_index = [math.degrees(angles[i - k][j - l]) for k, l in cells]
index = 0
for k in range(0, 8):
# calculate the difference
difference = angles_around_index[k] - angles_around_index[k + 1]
if difference > 90:
difference -= 180
elif difference < -90:
difference += 180
index += difference
if 180 - tolerance <= index <= 180 + tolerance:
return "loop"
if -180 - tolerance <= index <= -180 + tolerance:
return "delta"
if 360 - tolerance <= index <= 360 + tolerance:
return "whorl"
return "none"
def calculate_singularities(im, angles, tolerance, W, mask):
result = cv.cvtColor(im, cv.COLOR_GRAY2RGB)
# DELTA: RED, LOOP:ORAGNE, whorl:INK
colors = {"loop" : (0, 0, 255), "delta" : (0, 128, 255), "whorl": (255, 153, 255)}
FingerType = []
for i in range(3, len(angles) - 2): # Y
for j in range(3, len(angles[i]) - 2): # x
# mask any singularity outside of the mask
mask_slice = mask[(i-2)*W:(i+3)*W, (j-2)*W:(j+3)*W]
mask_flag = np.sum(mask_slice)
if mask_flag == (W*5)**2:
singularity = poincare_index_at(i, j, angles, tolerance)
# if singularity != "none":
if singularity == "delta":
FingerType.append([singularity, j*W, i*W])
cv.rectangle(result, ((j+0)*W, (i+0)*W), ((j+1)*W, (i+1)*W), colors[singularity], 3)
return result, FingerType
if __name__ == '__main__':
img = cv.imread('../test_img.png', 0)
cv.imshow('original', img)
angles = orientation.calculate_angles(img, 16, smoth=True)
result = calculate_singularities(img, angles, 1, 16)
| 35.152778 | 174 | 0.556302 |
964bfe94e67253b12bff9fcfb796256d0a568d26 | 3,612 | py | Python | tests/test_pysms.py | argandas/pysms | f0eb21435e65ddee151705cfd8d9a0449c03eea2 | [
"MIT"
] | null | null | null | tests/test_pysms.py | argandas/pysms | f0eb21435e65ddee151705cfd8d9a0449c03eea2 | [
"MIT"
] | null | null | null | tests/test_pysms.py | argandas/pysms | f0eb21435e65ddee151705cfd8d9a0449c03eea2 | [
"MIT"
] | null | null | null | import pytest
import simxxx.simxxx as simxxx
from unittest.mock import Mock
import serial
@pytest.fixture
def sim():
sim = simxxx.SERIAL_GSM_MODEM()
sim.open("COM4")
yield sim
sim.close()
def test_simxxx_send():
stub_serial = Mock(serial.Serial)
stub_serial.write = lambda x: len(x)
stub_serial.is_open = True
sim = simxxx.SERIAL_GSM_MODEM(stub_serial)
res = sim.send("Joe")
assert 3 == res
def test_simxxx_write():
stub_serial = Mock(serial.Serial)
stub_serial.write = lambda x: len(x)
stub_serial.is_open = True
sim = simxxx.SERIAL_GSM_MODEM(stub_serial)
res = sim.write([bytes(x, 'utf-8') for x in "1234567890"])
assert 10 == res
def test_simxxx_write_port_not_open():
stub_serial = Mock(serial.Serial)
stub_serial.write = lambda x: len(x)
stub_serial.is_open = False
sim = simxxx.SERIAL_GSM_MODEM(stub_serial)
with pytest.raises(Exception) as excinfo:
sim.write([bytes(x, 'utf-8') for x in "1234567890"])
assert "ERROR" in str(excinfo.value)
def test_simxxx_ping():
stub_serial = Mock(serial.Serial)
data_to_read = "OK\r\n"
stub_serial.write = lambda x: len(x)
stub_serial.is_open = True
stub_serial.read.side_effect = [bytes(x, 'utf-8') for x in data_to_read]
sim = simxxx.SIMXXX(stub_serial)
assert sim.ping() is True
def test_simxxx_read_error():
stub_serial = Mock(serial.Serial)
data_to_read = "ERROR:\r\n"
stub_serial.write = lambda x: len(x)
stub_serial.is_open = True
stub_serial.read.side_effect = [bytes(x, 'utf-8') for x in data_to_read]
sim = simxxx.SIMXXX(stub_serial)
with pytest.raises(Exception) as excinfo:
sim.wait_for_ok()
assert "ERROR" in str(excinfo.value)
def test_simxxx_read_error_cme():
stub_serial = Mock(serial.Serial)
data_to_read = "+CME ERROR:\r\n"
stub_serial.write = lambda x: len(x)
stub_serial.is_open = True
stub_serial.read.side_effect = [bytes(x, 'utf-8') for x in data_to_read]
sim = simxxx.SIMXXX(stub_serial)
with pytest.raises(Exception) as excinfo:
sim.wait_for_ok()
assert "ERROR" in str(excinfo.value)
def test_simxxx_read_error_cms():
stub_serial = Mock(serial.Serial)
data_to_read = "+CME ERROR:\r\n"
stub_serial.write = lambda x: len(x)
stub_serial.is_open = True
stub_serial.read.side_effect = [bytes(x, 'utf-8') for x in data_to_read]
sim = simxxx.SIMXXX(stub_serial)
with pytest.raises(Exception) as excinfo:
sim.wait_for_ok()
assert "ERROR" in str(excinfo.value)
"""
def test_cme_error_exception():
with pytest.raises(Exception) as excinfo:
stub_serial = Mock(serial.Serial)
stub_serial.write.return_value = 4
stub_serial.readline.return_value = b"+CME ERROR: Unexpected error\r\n"
sim = pysms.SIMXXX(stub_serial)
sim.ping()
assert "+CME ERROR" in str(excinfo.value)
def test_cms_error_exception():
with pytest.raises(Exception) as excinfo:
stub_serial = Mock(serial.Serial)
stub_serial.write.return_value = 4
stub_serial.readline.return_value = b"+CMS ERROR: Unexpected error\r\n"
sim = pysms.SIMXXX(stub_serial)
sim.ping()
assert "+CMS ERROR" in str(excinfo.value)
def test_generic_error_exception():
with pytest.raises(Exception) as excinfo:
stub_serial = Mock(serial.Serial)
stub_serial.write.return_value = 4
stub_serial.readline.return_value = b"ERROR\r\n"
sim = pysms.SIMXXX(stub_serial)
sim.ping()
assert "ERROR" in str(excinfo.value)
""" | 30.610169 | 79 | 0.683832 |
a8a1cf5a689f766bf805554c9637f0b7913d053b | 20,452 | py | Python | finrl_meta/factors/factors.py | AI4Finance-Foundation/FinRL-Metaverse | 7ada7fe917d9fdc55300409cb0a3cc719129e03a | [
"MIT"
] | 5 | 2021-11-04T07:13:08.000Z | 2021-11-07T01:41:49.000Z | finrl_meta/factors/factors.py | AI4Finance-Foundation/FinRL-Metaverse | 7ada7fe917d9fdc55300409cb0a3cc719129e03a | [
"MIT"
] | 1 | 2021-11-06T18:23:21.000Z | 2021-11-07T07:57:33.000Z | finrl_meta/factors/factors.py | AI4Finance-Foundation/FinRL-Metaverse | 7ada7fe917d9fdc55300409cb0a3cc719129e03a | [
"MIT"
] | null | null | null | import numpy as np
import scipy
import statsmodels.api as sm
import pandas as pd
def filter_Nan(df):
'''
各特征NaN的个数
'''
naCount_dict = {}
for col in df.columns.values:
if df[col].dtypes == float:
naCount_dict[col] = len(np.where(np.isnan(df[col]).values)[0])
for i in naCount_dict:
if (naCount_dict[i] > df.shape[0] / 10):
print(i, naCount_dict[i])
return naCount_dict
def del_Nan(data, columns):
'''
保留指定因子, 并从中去除含有NaN的项
'''
df = data[columns]
df.dropna(axis=1, how='all', inplace=True)
df.dropna(axis=0, how='any', inplace=True)
return df
def pearson_corr(df_, target):
'''
计算因子与目标值的皮尔逊系数相关性
'''
Pearson_dict = {}
df_.replace([np.inf, -np.inf], np.nan, inplace=True)
df = df_.dropna(axis=1, how='all')
df = df_.dropna(axis=0, how='any')
for i in df.columns.values:
if (type(df[i].values[-1]) == float or type(
df[i].values[-1]) == np.float64) and i != 'alpha084' and i != 'alpha191-017':
Pearson_dict[i] = scipy.stats.pearsonr(df[target].values, df[i].values)[0]
df_Pearson = pd.DataFrame(data=Pearson_dict, index=[0]).T
return abs(df_Pearson).sort_values(by=[0], ascending=False)
def spearmanr_corr(df_, target):
'''
计算因子与目标值的斯皮尔曼系数相关性
'''
Spearmanr_dict = {}
df_.replace([np.inf, -np.inf], np.nan, inplace=True)
df = df_.dropna(axis=1, how='all')
df = df_.dropna(axis=0, how='any')
for i in df.columns.values:
if (type(df[i].values[-1]) == float or type(
df[i].values[-1]) == np.float64) and i != 'alpha084' and i != 'alpha191-017':
Spearmanr_dict[i] = scipy.stats.spearmanr(df[target].values, df[i].values)[0]
df_Spearmanr = pd.DataFrame(data=Spearmanr_dict, index=[0]).T
return abs(df_Spearmanr).sort_values(by=[0], ascending=False)
def series_sum(S, N): #对序列求N天累计和,返回序列 N=0对序列所有依次求和
return pd.Series(S).rolling(N).sum().values if N>0 else pd.Series(S).cumsum().values
def ref(S, N=1): #对序列整体下移动N,返回序列(shift后会产生NAN)
return pd.Series(S).shift(N).values
def ma(S,N): #求序列的N日简单移动平均值,返回序列
return pd.Series(S).rolling(N).mean().values
def ema(S,N): #指数移动平均,为了精度 S>4*N EMA至少需要120周期 alpha=2/(span+1)
return pd.Series(S).ewm(span=N, adjust=False).mean().values
def avedev(S, N): #平均绝对偏差 (序列与其平均值的绝对差的平均值)
return pd.Series(S).rolling(N).apply(lambda x: (np.abs(x - x.mean())).mean()).values
def std(S,N): #求序列的N日标准差,返回序列
return pd.Series(S).rolling(N).std(ddof=0).values
def llv(S,N): #llv(C, 5) 最近5天收盘最低价
return pd.Series(S).rolling(N).min().values
def hhv(S,N): #hhv(C, 5) 最近5天收盘最高价
return pd.Series(S).rolling(N).max().values
def sma(S, N, M=1): #中国式的SMA,至少需要120周期才精确 (雪球180周期) alpha=1/(1+com)
return pd.Series(S).ewm(alpha=M/N,adjust=False).mean().values #com=N-M/M
def atr(CLOSE,HIGH,LOW, N=20): #真实波动N日平均值
TR = np.maximum(np.maximum((HIGH - LOW), np.abs(ref(CLOSE, 1) - HIGH)), np.abs(ref(CLOSE, 1) - LOW))
return ma(TR, N)
def dma(S, A): #求S的动态移动平均,A作平滑因子,必须 0<A<1 (此为核心函数,非指标)
if isinstance(A,(int,float)): return pd.Series(S).ewm(alpha=A,adjust=False).mean().values
A=np.array(A)
A[np.isnan(A)]=1.0
Y= np.zeros(len(S))
Y[0]=S[0]
for i in range(1,len(S)): Y[i]=A[i]*S[i]+(1-A[i])*Y[i-1] #A支持序列 by jqz1226
return Y
class MomentumFactors:
'''
动量类因子
'''
# 5日乖离率 'ic_mean': '-0.045657'
def bias_5_days(close, N=5):
#(收盘价-收盘价的N日简单平均)/ 收盘价的N日简单平均*100,在此n取5
mac = ma(close, N)
return (close - mac) / (mac * 100)
# 10日乖离率 'ic_mean': '-0.043967'
def bias_10_days(close, N=10):
# (收盘价-收盘价的N日简单平均)/ 收盘价的N日简单平均*100,在此n取10
mac = ma(close, N)
return (close - mac) / (mac * 100)
# 60日乖离率 'ic_mean': '-0.039533'
def bias_60_days(close, N=60):
# (收盘价-收盘价的N日简单平均)/ 收盘价的N日简单平均*100,在此n取60
mac = ma(close, N)
return (close - mac) / (mac * 100)
# 当前股价除以过去一个月股价均值再减1 'ic_mean': '-0.039303'
def price_1_month(close, N=21):
# 当日收盘价 / mean(过去一个月(21天)的收盘价) -1
return close / close.rolling(N).mean() - 1
# 当前股价除以过去三个月股价均值再减1 'ic_mean': '-0.034927'
def price_3_monthes(close, N=61):
# 当日收盘价 / mean(过去三个月(61天)的收盘价) -1
return close / close.rolling(N).mean() - 1
# 6日变动速率(Price Rate of Change) 'ic_mean': '-0.030587'
def roc_6_days(close, N=6):
# ①AX=今天的收盘价—6天前的收盘价
# ②BX=6天前的收盘价
# ③ROC=AX/BX*100
BX = close.shift(N)
AX = close - BX
return AX / (BX * 100)
# 12日变动速率(Price Rate of Change) 'ic_mean': '-0.034748'
def roc_12_days(close, N=12):
# ①AX=今天的收盘价—12天前的收盘价 ②BX=12天前的收盘价 ③ROC=AX/BX*100
BX = close.shift(N)
AX = close - BX
return AX / (BX * 100)
# 20日变动速率(Price Rate of Change) 'ic_mean': '-0.031276'
def roc_20_days(close, N=20):
# ①AX=今天的收盘价—20天前的收盘价 ②BX=20天前的收盘价 ③ROC=AX/BX*100
BX = close.shift(N)
AX = close - BX
return AX / (BX * 100)
# 单日价量趋势 'ic_mean': '-0.051037'
def single_day_vpt(df):
# (今日收盘价 - 昨日收盘价)/ 昨日收盘价 * 当日成交量 # (复权方法为基于当日前复权)
sft = df['close_price'].shift(1)
return (df['close_price'] - sft) / sft * df['volume']
# 单日价量趋势6日均值 'ic_mean': '-0.032458'
def single_day_vpt_6(df):
# ma(single_day_VPT, 6)
sft = df['close_price'].shift(1)
return pd.Series(ma((df['close_price'] - sft) / sft * df['volume'], 6))
# 单日价量趋势12均值 'ic_mean': '-0.031016'
def single_day_vpt_12(df):
# ma(single_day_VPT, 12)
sft = df['close_price'].shift(1)
return pd.Series(ma((df['close_price'] - sft) / sft * df['volume'], 12))
# 10日顺势指标 'ic_mean': '-0.038179'
def cci_10_days(df, N=10):
# CCI:=(TYP-ma(TYP,N))/(0.015*avedev(TYP,N)) TYP:=(HIGH+LOW+CLOSE)/3 N:=10
TYP = (df['high_price'] + df['low_price'] + df['close_price']) / 3
return (TYP - ma(TYP,N)) / (0.015 * avedev(TYP,N))
# 15日顺势指标 'ic_mean': '-0.035973'
def cci_15_days(df, N=15):
# CCI:=(TYP-ma(TYP,N))/(0.015*avedev(TYP,N)) TYP:=(HIGH+LOW+CLOSE)/3 N:=15
TYP = (df['high_price'] + df['low_price'] + df['close_price']) / 3
return (TYP - ma(TYP, N)) / (0.015 * avedev(TYP, N))
# 20日顺势指标 'ic_mean': '-0.033437'
def cci_20_days(df, N=20):
# CCI:=(TYP-ma(TYP,N))/(0.015*avedev(TYP,N)) TYP:=(HIGH+LOW+CLOSE)/3 N:=20
TYP = (df['high_price'] + df['low_price'] + df['close_price']) / 3
return (TYP - ma(TYP, N)) / (0.015 * avedev(TYP, N))
# 当前交易量相比过去1个月日均交易量 与过去过去20日日均收益率乘积 'ic_mean': '-0.032789'
# def Volume1M(volume, profit):
# # 当日交易量 / 过去20日交易量MEAN * 过去20日收益率MEAN
def volume_1_month(df, N=21):
# 当日交易量 / 过去20日交易量MEAN * 过去20日收益率MEAN
return df['volume'] / df['volume'].rolling(N).mean() * df['target'].rolling(N).mean()
# 多头力道 'ic_mean': '-0.039968'
def bull_power(df, timeperiod=13):
return (df['high_price'] - ema(df['close_price'], timeperiod)) / df['close_price']
class EmotionFactors:
'''
情绪类因子
'''
# 换手率: 某一段时期内的成交量/发行总股数×100%
# 5日平均换手率 'ic_mean': '-0.044'
def vol_5_days(S, total_volume, N=5):
# 5日换手率均值
S = S / total_volume
return pd.Series(S).rolling(N).mean()
# 10日平均换手率 'ic_mean': '-0.040'
def vol_10_days(S, total_volume, N=10):
# 10日换手率的均值
S = S / total_volume
return pd.Series(S).rolling(N).mean()
# 20日平均换手率 'ic_mean': '-0.035'
def vol_20_days(S, total_volume, N=20):
# 20日换手率的均值,单位为%
S = S / total_volume
return pd.Series(S).rolling(N).mean()
# 5日平均换手率与120日平均换手率 'ic_mean': '-0.039'
def davol_5_days(S):
# 5日平均换手率 / 120日平均换手率
return EmotionFactors.vol_5_days(S) / EmotionFactors.vol_5_days(S, N=120)
# 10日平均换手率与120日平均换手率之比 'ic_mean': '-0.033'
def davol_10_days(S):
# 10日平均换手率 / 120日平均换手率
return EmotionFactors.vol_10_days(S) / EmotionFactors.vol_5_days(S, N=120)
# 10日成交量标准差 'ic_mean': '-0.037'
def vstd_10_days(volume, N=10):
# 10日成交量去标准差
return pd.Series(std(volume, N))
# 20日成交量标准差 'ic_mean': '-0.033'
def vstd_20_days(volume, N=20):
# 20日成交量去标准差
return pd.Series(std(volume, N))
# 6日成交金额的标准差 'ic_mean': '-0.044'
def tvstd_6_days(df, N=6):
# 6日成交额的标准差
trades = df['close_price'] * df['volume']
return pd.Series(std(trades, N))
# 20日成交金额的标准差 'ic_mean': '-0.038'
def tvstd_20_days(df, N=20):
# 20日成交额的标准差
trades = df['close_price'] * df['volume']
return pd.Series(std(trades, N))
# 成交量的5日指数移动平均 'ic_mean': '-0.035'
def vema_5_days(volume, N=5):
#
return pd.Series(ema(volume, N))
# 成交量的10日指数移动平均 'ic_mean': '-0.032'
def vema_10_days(volume, N=10):
#
return pd.Series(ema(volume, N))
# 12日成交量的移动平均值 'ic_mean': '-0.031'
def vema_12_days(volume, N=12):
#
return pd.Series(ema(volume, N))
# 成交量震荡 'ic_mean': '-0.039'
def vosc(volume):
# 'VEMA12'和'VEMA26'两者的差值,再求差值与'VEMA12'的比,最后将比值放大100倍,得到VOSC值
ema12 = ema(volume, 12)
return pd.Series((ema(volume, 26) - ema12 / (ema12 * 100)))
# 6日量变动速率指标 'ic_mean': '-0.032'
def vroc_6_days(volume, N=6):
# 成交量减N日前的成交量,再除以N日前的成交量,放大100倍,得到VROC值 ,n=6
sft = volume.shift(N)
return pd.Series((volume - sft) / (sft * 100))
# 12日量变动速率指标 'ic_mean': '-0.040'
def vroc_12_days(volume, N=12):
# 成交量减N日前的成交量,再除以N日前的成交量,放大100倍,得到VROC值 ,n=12
sft = volume.shift(N)
return pd.Series((volume - sft) / (sft * 100))
# 6日成交金额的移动平均值 'ic_mean': '-0.038'
def tvma_6_days(df, N=6):
# 6日成交金额的移动平均值
trades = df['close_price'] * df['volume']
return pd.Series(ma(trades, N))
# 威廉变异离散量 'ic_mean': '-0.031'
def wvad(df, N=6):
# (收盘价-开盘价)/(最高价-最低价)×成交量,再做加和,使用过去6个交易日的数据
WVA = (df['close_price'] - df['open_price']) / (df['high_price'] - df['low_price']) * df['volume']
return WVA.rolling(N).sum()
# 换手率相对波动率 'ic_mean': '-0.042'
def turnover_volatility(volume, total_volume, N=20):
# 取20个交易日个股换手率的标准差
turnover = volume / total_volume
return pd.Series(std(turnover, N))
# 人气指标 'ic_mean': '-0.031'
def ar(df, N=26):
# AR=N日内(当日最高价—当日开市价)之和 / N日内(当日开市价—当日最低价)之和 * 100,n设定为26
ho = (df['high_price'] - df['open_price']).rolling(N).sum()
ol = (df['open_price'] - df['low_price']).rolling(N).sum()
return ho / (ol * 100)
class extraFacters:
'''
特殊因子
'''
def rsrs(df, N):
#用于记录回归后的beta值,即斜率
ans = []
#用于计算被决定系数加权修正后的贝塔值
ans_rightdev= []
# 一:RSRS指标的构建过程:
# 1,取前N日的最高价序列与最低价序列。
# 2,将两列数据按前述OLS线性回归模型拟合出当日的斜率值(Beta)。
# 3,取前M日的斜率时间序列,计算当日斜率的标准分z。
# 4,将z与拟合方程的决定系数相乘,作为当日RSRS指标值。
X = sm.add_constant(df['low_price'])
model = sm.OLS(df['high_price'], X)
beta = model.fit().params
r2=model.fit().rsquared
ans.append(beta)
# 计算标准化的RSRS指标
# 计算均值序列
section = ans[-N:]
# 计算均值序列
mu = np.mean(section)
# 计算标准化RSRS指标序列
sigma = np.std(section)
zscore = (section[-1]-mu)/sigma
#计算右偏RSRS标准分
return pd.Series(zscore*beta*r2)
def vix():
pass
class generalFactors:
'''
常见因子
'''
def macd(CLOSE, SHORT=12, LONG=26, M=9): # EMA的关系,S取120日,和雪球小数点2位相同
DIF = ema(CLOSE,SHORT)-ema(CLOSE,LONG)
DEA = ema(DIF,M)
MACD=(DIF-DEA)*2
return np.round(MACD,3)
def kdj(df, KDJ_type, N=9, M1=3, M2=3): # KDJ指标
RSV = (df['close_price'] - llv(df['low_price'], N)) / (hhv(df['high_price'], N) - llv(df['low_price'], N)) * 100
K = ema(RSV, (M1*2-1))
if KDJ_type == 'KDJ_K':
return K
elif KDJ_type == 'KDJ_D':
return ema(K, (M2*2-1))
elif KDJ_type == 'KDJ_J':
D = ema(K, (M2*2-1))
return K*3-D*2
def rsi(CLOSE, N=24): # RSI指标,和通达信小数点2位相同
DIF = CLOSE-ref(CLOSE, 1)
return np.round(sma(np.maximum(DIF, 0), N) / sma(np.abs(DIF), N) * 100,3)
def wr(df, N=10): #W&R 威廉指标
WR = (hhv(df['high_price'], N) - df['close_price']) / (hhv(df['high_price'], N) - llv(df['low_price'], N)) * 100
return np.round(WR,3)
def roll(CLOSE, BOLL_type, N=20, P=2): #BOLL指标,布林带
MID = ma(CLOSE, N)
if BOLL_type == 'BOLL_mid':
return MID
elif BOLL_type == 'BOLL_upper':
return MID + std(CLOSE, N) * P
elif BOLL_type == 'BOLL_lower':
return MID - std(CLOSE, N) * P
# return RD(UPPER), RD(MID), RD(LOWER)
def psy(CLOSE, PSY_type, N=12, M=6):
PSY=series_sum(CLOSE>ref(CLOSE,1),N)/N*100
if PSY_type == 'PSY':
return PSY
elif PSY_type == 'PSYMA':
return ma(PSY,M)
# return RD(PSY), RD(PSYMA)
def atr(df, N=20): #真实波动N日平均值
TR = np.maximum(np.maximum((df['high_price'] - df['low_price']), np.abs(ref(df['close_price'], 1) - df['high_price'])), np.abs(ref(df['close_price'], 1) - df['low_price']))
return ma(TR, N)
def bbi(CLOSE,M1=3,M2=6,M3=12,M4=20): #BBI多空指标
return (ma(CLOSE,M1)+ma(CLOSE,M2)+ma(CLOSE,M3)+ma(CLOSE,M4))/4
def dmi(df, DMI_type, M1=14,M2=6): #动向指标:结果和同花顺,通达信完全一致
TR = series_sum(np.maximum(np.maximum(df['high_price'] - df['low_price'], np.abs(df['high_price'] - ref(df['close_price'], 1))), np.abs(df['low_price'] - ref(df['close_price'], 1))), M1)
HD = df['high_price'] - ref(df['high_price'], 1)
LD = ref(df['low_price'], 1) - df['low_price']
DMP = series_sum(np.where((HD > 0) & (HD > LD), HD, 0), M1)
DMM = series_sum(np.where((LD > 0) & (LD > HD), LD, 0), M1)
PDI = DMP * 100 / TR
MDI = DMM * 100 / TR
if DMI_type == 'DMI_PDI':
return PDI
elif DMI_type == 'DMI_MDI':
return MDI
elif DMI_type == 'DMI_ADX':
return ma(np.abs(MDI - PDI) / (PDI + MDI) * 100, M2)
elif DMI_type == 'DMI_ADXR':
ADX = ma(np.abs(MDI - PDI) / (PDI + MDI) * 100, M2)
return (ADX + ref(ADX, M2)) / 2
# return PDI, MDI, ADX, ADXR
def taq(df, TAQ_type, N=6): #唐安奇通道(海龟)交易指标,大道至简,能穿越牛熊
UP=hhv(df['high_price'], N)
DOWN=llv(df['low_price'], N)
#MID=(UP+DOWN)/2
if TAQ_type == 'TAQ_UP':
return UP
elif TAQ_type == 'TAQ_DOWN':
return DOWN
elif TAQ_type == 'TAQ_MID':
return (UP+DOWN)/2
# return UP,MID,DOWN
def ktn(df, KTN_type, N=20, M=10): #肯特纳交易通道, N选20日,ATR选10日
MID=ema((df['high_price'] + df['low_price'] + df['close_price'])/3, N)
if KTN_type == 'KTN_mid':
return MID
elif KTN_type == 'KTN_upper':
ATRN=atr(df['close_price'], df['high_price'], df['low_price'], M)
return MID+2*ATRN
elif KTN_type == 'KTN_lower':
ATRN=atr(df['close_price'], df['high_price'], df['low_price'], M)
return MID-2*ATRN
# return UPPER,MID,LOWER
def trix(CLOSE, TRIX_type, M1=12, M2=20): #三重指数平滑平均线
TR = ema(ema(ema(CLOSE, M1), M1), M1)
TRIX = (TR - ref(TR, 1)) / ref(TR, 1) * 100
if TRIX_type == 'TRIX':
return TRIX
elif TRIX_type == 'TRMA':
return ma(TRIX, M2)
# return TRIX, TRMA
def vr(df,M1=26): #VR容量比率
LC = ref(df['close_price'], 1)
return series_sum(np.where(df['close_price'] > LC, df['volume'], 0), M1) / series_sum(np.where(df['close_price'] <= LC, df['volume'], 0), M1) * 100
def emv(df, EMV_type, N=14, M=9): #简易波动指标
VOLUME=ma(df['volume'], N)/df['volume']
MID=100*(df['high_price'] + df['low_price'] - ref(df['high_price'] + df['low_price'], 1))/(df['high_price'] + df['low_price'])
EMV=ma(MID*VOLUME*(df['high_price'] - df['low_price'])/ma(df['high_price'] - df['low_price'], N), N)
if EMV_type == 'EMV':
return EMV
elif EMV_type == 'MAEMV':
return ma(EMV,M)
# return EMV,MAEMV
def dpo(CLOSE, DPO_type, M1=20, M2=10, M3=6): #区间震荡线
DPO = CLOSE - ref(ma(CLOSE, M1), M2)
if DPO_type == 'DPO':
return DPO
elif DPO_type == 'MADPO':
return ma(DPO, M3)
# return DPO, MADPO
def brar(df, M1=26): #BRAR-ARBR 情绪指标
# AR = series_sum(HIGH - OPEN, M1) / series_sum(OPEN - LOW, M1) * 100
return series_sum(np.maximum(0, df['high_price'] - ref(df['close_price'], 1)), M1) / series_sum(np.maximum(0, ref(df['close_price'], 1) - df['low_price']), M1) * 100
# return AR, BR
def dfma(CLOSE, N1=10, N2=50, M=10): #平行线差指标
DIF=ma(CLOSE,N1)-ma(CLOSE,N2)
DIFMA=ma(DIF,M) #通达信指标叫DMA 同花顺叫新DMA
return DIFMA
def mtm(CLOSE, MTM_type, N=12,M=6): #动量指标
MTM=CLOSE-ref(CLOSE, N)
if MTM_type == 'MTM':
return MTM
elif MTM_type == 'MTMMA':
return ma(MTM,M)
# return MTM,MTMMA
def mass(df, MASS_type, N1=9, N2=25, M=6): #梅斯线
MASS=series_sum(ma(df['high_price'] - df['low_price'], N1)/ma(ma(df['high_price'] - df['low_price'], N1), N1), N2)
if MASS_type == 'MASS':
return MASS
elif MASS_type == 'MA_MASS':
return ma(MASS,M)
# MA_MASS=ma(MASS,M)
# return MASS,MA_MASS
def obv(df): #能量潮指标
return series_sum(np.where(df['close_price']>ref(df['close_price'], 1), df['volume'], np.where(df['close_price']<ref(df['close_price'], 1), - df['volume'], 0)), 0)/10000
def mfi(df, N=14): #MFI指标是成交量的RSI指标
TYP = (df['high_price'] + df['low_price'] + df['close_price'])/3
V1 = series_sum(np.where(TYP>ref(TYP,1), TYP*df['volume'], 0), N)/series_sum(np.where(TYP<ref(TYP, 1), TYP*df['volume'], 0), N)
return 100-(100/(1+V1))
def asi(df, ASI_type, M1=26, M2=10): #振动升降指标
LC = ref(df['close_price'], 1)
AA = np.abs(df['high_price'] - LC)
BB = np.abs(df['low_price'] - LC)
CC = np.abs(df['high_price'] - ref(df['low_price'],1))
DD = np.abs(LC-ref(df['open_price'], 1))
R = np.where( (AA>BB) & (AA>CC), AA+BB/2+DD/4, np.where( (BB>CC) & (BB>AA),BB+AA/2+DD/4,CC+DD/4))
X = (df['close_price'] - LC + (df['close_price'] - df['open_price'])/2 + LC - ref(df['open_price'], 1))
SI = 16*X/R*np.maximum(AA, BB)
ASI = series_sum(SI, M1)
if ASI_type == 'ASI':
return ASI
elif ASI_type == 'ASIT':
return ma(ASI, M2)
# return ASI,ASIT
def xsii(df, XSII_type, N=102, M=7): #薛斯通道II
AA = ma((2*df['close_price'] + df['high_price'] + df['low_price'])/4, 5) #最新版DMA才支持 2021-12-4
# TD1 = AA*N/100 TD2 = AA*(200-N) / 100
if XSII_type == 'XSII_TD1':
return AA*N/100
elif XSII_type == 'XSII_TD2':
return AA*(200-N) / 100
elif XSII_type == 'XSII_TD3':
CC = np.abs((2*df['close_price'] + df['high_price'] + df['low_price'])/4 - ma(df['close_price'], 20))/ma(df['close_price'], 20)
BB = df['close_price'].reset_index()['close_price']
DD = dma(BB, CC)
return (1+M/100)*DD
elif XSII_type == 'XSII_TD4':
CC = np.abs((2*df['close_price'] + df['high_price'] + df['low_price'])/4 - ma(df['close_price'], 20))/ma(df['close_price'], 20)
BB = df['close_price'].reset_index()['close_price']
DD = dma(BB, CC)
return (1-M/100)*DD
| 37.185455 | 194 | 0.542832 |
f042658a648342bb2b7e35e72e9f826105b2d1ba | 587 | py | Python | myawwards/migrations/0004_auto_20210721_1838.py | VGichuki/awwards-app | 67ea12812ef93f0fc4735a65c87af64c86fd0a31 | [
"Unlicense",
"MIT"
] | null | null | null | myawwards/migrations/0004_auto_20210721_1838.py | VGichuki/awwards-app | 67ea12812ef93f0fc4735a65c87af64c86fd0a31 | [
"Unlicense",
"MIT"
] | null | null | null | myawwards/migrations/0004_auto_20210721_1838.py | VGichuki/awwards-app | 67ea12812ef93f0fc4735a65c87af64c86fd0a31 | [
"Unlicense",
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-21 15:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myawwards', '0003_auto_20210721_1627'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profile_pic',
field=models.ImageField(blank=True, default='', upload_to='images/'),
),
migrations.AlterField(
model_name='project',
name='image',
field=models.ImageField(upload_to='projects/'),
),
]
| 24.458333 | 81 | 0.584327 |
f8af979a421c6b37426f54e77c952e741896f170 | 2,685 | py | Python | src/language_model/MaskedLM/IsRoBERTa.py | NeuroCode-io/icelandic-language-model | 3ac81bf0470926ca96abacef2c63118b07609afc | [
"MIT"
] | null | null | null | src/language_model/MaskedLM/IsRoBERTa.py | NeuroCode-io/icelandic-language-model | 3ac81bf0470926ca96abacef2c63118b07609afc | [
"MIT"
] | null | null | null | src/language_model/MaskedLM/IsRoBERTa.py | NeuroCode-io/icelandic-language-model | 3ac81bf0470926ca96abacef2c63118b07609afc | [
"MIT"
] | null | null | null | from pathlib import Path
from transformers import RobertaConfig, DataCollatorForLanguageModeling, RobertaForMaskedLM, Trainer, TrainingArguments
from torch.utils.data.dataset import Dataset
from transformers.modeling_auto import AutoModelForMaskedLM
from transformers.tokenization_utils import PreTrainedTokenizer
from language_model.lib import log, azure_storage
logger = log.get_logger(__file__)
class IsRoBERTa:
def __init__(self, data_dir: Path, tokenizer: PreTrainedTokenizer, dataset: Dataset, local_rank=-1):
assert data_dir, "data_dir input needed"
self.model_dir = f"{data_dir}/results"
self.dataset = dataset
self.config = RobertaConfig(
vocab_size=52_000,
max_position_embeddings=514,
num_attention_heads=12,
num_hidden_layers=6,
type_vocab_size=1,
)
self.training_args = TrainingArguments(
run_name=data_dir.name,
local_rank=local_rank,
learning_rate=0.00005, # default 0.00005
output_dir=f"{self.model_dir}",
overwrite_output_dir=False,
num_train_epochs=1,
per_device_train_batch_size=48, # Nvidia K80 99%
seed=42,
save_steps=10_000,
save_total_limit=1,
)
self.data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=0.15)
def upload(self):
paths = [str(x) for x in Path(self.model_dir).glob("**/*")]
for file in paths:
azure_storage.upload(file)
def has_started(self):
paths = [str(x) for x in Path(self.model_dir).glob("*")]
for path in paths:
if "checkpoint" in path:
return True
return False
def get_latest_checkpoint(self):
paths = [str(x) for x in Path(self.model_dir).glob("*")]
checkpoints = [path for path in paths if "checkpoint" in path]
return sorted(checkpoints)[-1]
def train(self):
if self.has_started():
last_checkpoint = self.get_latest_checkpoint()
logger.info(f"Resuming training from: {last_checkpoint}")
model = AutoModelForMaskedLM.from_pretrained(last_checkpoint, config=self.config)
else:
model = RobertaForMaskedLM(config=self.config)
trainer = Trainer(
model=model,
args=self.training_args,
data_collator=self.data_collator,
train_dataset=self.dataset,
prediction_loss_only=True,
)
trainer.train()
trainer.save_model(f"{self.model_dir}")
self.upload()
| 31.588235 | 119 | 0.640596 |
578516c2fae5b00511c223f8a357a7fa3a229e3d | 2,475 | py | Python | .github/validators/validate-syntax.py | DSOTraining/kics | 1b5aff071b3221f03b008eb7d09ae5ce616ce96c | [
"Apache-2.0"
] | null | null | null | .github/validators/validate-syntax.py | DSOTraining/kics | 1b5aff071b3221f03b008eb7d09ae5ce616ce96c | [
"Apache-2.0"
] | null | null | null | .github/validators/validate-syntax.py | DSOTraining/kics | 1b5aff071b3221f03b008eb7d09ae5ce616ce96c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import argparse
import glob
import os
import subprocess
from subprocess import CalledProcessError
LINTER_PATH = os.getenv('LINTER_PATH')
SKIP_LIST = os.getenv('SKIP_LIST_PATH')
EXTRA_ARGS = os.getenv('EXTRA_ARGS')
NO_PROGRESS = os.getenv('NO_PROGRESS', False)
parser = argparse.ArgumentParser(description='Execute linter against files')
parser.add_argument('filesglob', metavar='FILES',
type=str, nargs='+', help='List of file globs to check')
args = parser.parse_args()
##############################
# show summary and exit #
# code 1 if errors > 0 #
##############################
def summary(files, error_files):
print('\n\n>>------------------------------------------------')
print(f'found {len(error_files)} issues in {len(files)} file checked')
print('>>------------------------------------------------')
if len(error_files) > 0:
exit(1)
else:
exit(0)
print('starting validator')
##############################
# get files and skip list #
##############################
all_files = []
ignore_list = []
for my_glob in args.filesglob:
all_files.extend(glob.glob(my_glob))
with open(SKIP_LIST, 'r') as reader:
ignore_list = [line.rstrip() for line in reader]
print(f"Ignore list is:{os.linesep} {os.linesep.join(ignore_list)}")
files = [file for file in all_files
if file not in ignore_list
and 'positive_expected_result.json' not in file]
error_files = []
print(f'found {len(files)} files to check')
##############################
# run linter #
##############################
for file in files:
result = ''
try:
cmds = [LINTER_PATH, file]
if EXTRA_ARGS:
if len(EXTRA_ARGS.split(' ')):
cmds = cmds + EXTRA_ARGS.split(' ')
else:
cmds.append(EXTRA_ARGS)
if not NO_PROGRESS:
print(f'Validating {file}')
result = subprocess.check_output(cmds, shell=False).decode('utf-8').rstrip()
except CalledProcessError as e:
error_files.append(e)
if result:
for line in result.split('\n'):
if line:
print(f"{line}")
################################
# list errors #
################################
if len(error_files) > 0:
print("\n--- errors ---")
for error in error_files:
print(error)
error_result = error.output.decode('utf-8').rstrip()
for line in error_result.split('\n'):
print(line)
summary(files, error_files)
else:
summary(files, error_files)
| 27.5 | 80 | 0.569293 |
91cbd945870ef109116d07e02b1c233f2f5d0870 | 2,814 | py | Python | Testes com Pygame/basic_tutorial.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | Testes com Pygame/basic_tutorial.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | Testes com Pygame/basic_tutorial.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | import pygame
from pygame.locals import *
# Inicializa todos os módulos
pygame.init()
# Ou inicializa um módulo específico
pygame.font.init()
screen = [1, 1, 2, 2, 2, 1]
print(screen) # [1, 1, 2, 2, 2, 1]
screen[3] = 8
print(screen)
# Creating a MAP
background = [1, 1, 2, 2, 2, 1]
screen = [0]*6 #a new blank screen
for i in range(6):
screen[i] = background[i]
print(screen) #[1, 1, 2, 2, 2, 1]
playerpos = 3
screen[playerpos] = 8
print(screen) #[1, 1, 2, 8, 2, 1]
screen[playerpos] = background[playerpos]
playerpos = playerpos - 1
screen[playerpos] = 8
print(screen) #[1, 1, 8, 2, 2, 1]
# Smooth Movement
screen = pygame.display.set_mode((640, 480))
player = pygame.image.load('player.bmp').convert()
background = pygame.image.load('liquid.bmp').convert()
screen.blit(background, (0, 0)) #draw the background
position = player.get_rect()
screen.blit(player, position) #draw the player
pygame.display.update() #and show it all
for x in range(100): #animate 100 frames
screen.blit(background, position, position) #erase
position = position.move(2, 0) #move player
screen.blit(player, position) #draw new player
pygame.display.update() #and show it all
pygame.time.delay(100) #stop the program for 1/10 second
'''
# First, The Mystery Functions
>>> player = pygame.image.load('player.bmp').convert()
>>> background = pygame.image.load('liquid.bmp').convert()
>>> screen = pygame.display.set_mode((640, 480))
# Handling Some Input
>>> while 1:
... for event in pygame.event.get():
... if event.type in (QUIT, KEYDOWN):
... sys.exit()
... move_and_draw_all_game_objects()
# Moving Multiple Images
>>> class GameObject:
... def __init__(self, image, height, speed):
... self.speed = speed
... self.image = image
... self.pos = image.get_rect().move(0, height)
... def move(self):
... self.pos = self.pos.move(0, self.speed)
... if self.pos.right > 600:
... self.pos.left = 0
# Putting It All Together
>>> screen = pygame.display.set_mode((640, 480))
>>> player = pygame.image.load('player.bmp').convert()
>>> background = pygame.image.load('background.bmp').convert()
>>> screen.blit(background, (0, 0))
>>> objects = []
>>> for x in range(10): #create 10 objects</i>
... o = GameObject(player, x*40, x)
... objects.append(o)
>>> while 1:
... for event in pygame.event.get():
... if event.type in (QUIT, KEYDOWN):
... sys.exit()
... for o in objects:
... screen.blit(background, o.pos, o.pos)
... for o in objects:
... o.move()
... screen.blit(o.image, o.pos)
... pygame.display.update()
... pygame.time.delay(100)
'''
| 25.816514 | 72 | 0.603056 |
008ee961246b37fface2b7624885c21ec27161c5 | 234 | py | Python | setup.py | eswk22/titanicdisaster-challenge-koggle | 8384a91a6e221bfb5a9b90d1317fa985eaf34abe | [
"MIT"
] | null | null | null | setup.py | eswk22/titanicdisaster-challenge-koggle | 8384a91a6e221bfb5a9b90d1317fa985eaf34abe | [
"MIT"
] | null | null | null | setup.py | eswk22/titanicdisaster-challenge-koggle | 8384a91a6e221bfb5a9b90d1317fa985eaf34abe | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.0.1',
description='Koggle chanllege for titanic disaster',
author='Eswaran Krishnamoorthy',
license='MIT',
)
| 21.272727 | 56 | 0.692308 |
07bf2f664f0ab65a9fb47404770737f078e879ae | 2,842 | py | Python | ScienceCruiseDataManagement/ship_data/utilities.py | Swiss-Polar-Institute/science-cruise-data-management | 67721a0f4a1255b8ac43e530ed95a8c324239c7c | [
"MIT"
] | 6 | 2017-10-06T09:18:04.000Z | 2022-02-10T08:54:56.000Z | ScienceCruiseDataManagement/ship_data/utilities.py | Swiss-Polar-Institute/science-cruise-data-management | 67721a0f4a1255b8ac43e530ed95a8c324239c7c | [
"MIT"
] | 12 | 2020-02-27T09:24:50.000Z | 2021-09-22T17:39:55.000Z | ScienceCruiseDataManagement/ship_data/utilities.py | Swiss-Polar-Institute/science-cruise-data-management | 67721a0f4a1255b8ac43e530ed95a8c324239c7c | [
"MIT"
] | 1 | 2017-10-16T13:49:33.000Z | 2017-10-16T13:49:33.000Z | import unittest
import datetime
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
def nmea_degrees_to_decimal_degrees(nmea_degrees):
decimal_position = nmea_degrees.find(".")
minutes = nmea_degrees[decimal_position-2:]
degrees = nmea_degrees[0:decimal_position-2]
if degrees == "":
degrees = "0"
return float(degrees) + float(minutes)/60
def nmea_lat_long_to_normal(latitude, north_south,
longitude, east_west):
""" Converts:
From: "3550.28461074,S,01801.84299457,E" # a string
To: (-35.838000,18.030666) # a tuple
Note that the parameters in the previous string would be:
latitude=3550.28461074
north_south=S
longitude=01801.84299457
east_west=E
"""
latitude_degrees = nmea_degrees_to_decimal_degrees(latitude)
longitude_degrees = nmea_degrees_to_decimal_degrees(longitude)
if north_south == "S":
latitude_degrees *= -1
elif north_south == "N":
pass
else:
assert False
if east_west == "W":
longitude_degrees *= -1
elif east_west == "E":
pass
else:
assert False
return (latitude_degrees, longitude_degrees)
if __name__ == "__main__":
assert nmea_lat_long_to_normal("3550.28461074","S","01801.84299457","E") \
== (-35.83807684566667, 18.030716576166668)
def check_lat_lon_direction(north_south, east_west):
'''Check that the latitude and longitude direction eg. N, S, E, W is valid.'''
if (north_south == 'N' or north_south =='S') and (east_west =='E' or east_west == 'W'):
return True
else:
return False
def string_date_time_to_tuple(date_time_string):
'''Convert string formatted date and time to integers which can then be handled by the python module, datetime.'''
[date, time] = date_time_string.split(' ')
year = int(date.split('-')[0])
month = int(date.split('-')[1])
day = int(date.split('-')[2])
hour = int(time.split(':')[0])
minute = int(time.split(':')[1])
second = int(time.split(':')[2])
utc = datetime.timezone(datetime.timedelta(0))
millions_of_sec = 0
return (year, month, day, hour, minute, second, millions_of_sec, utc)
| 32.666667 | 118 | 0.657635 |
4f39d27579162cde41c642afaba007cc92bdd33d | 12,119 | py | Python | epgrefresh/src/EPGRefreshResource.py | Haehnchen/enigma2-plugins | 23007eb0b78665cd3a2faf98d1d6145b4f0ada3f | [
"OLDAP-2.3"
] | 1 | 2020-01-27T22:53:56.000Z | 2020-01-27T22:53:56.000Z | epgrefresh/src/EPGRefreshResource.py | Haehnchen/enigma2-plugins | 23007eb0b78665cd3a2faf98d1d6145b4f0ada3f | [
"OLDAP-2.3"
] | null | null | null | epgrefresh/src/EPGRefreshResource.py | Haehnchen/enigma2-plugins | 23007eb0b78665cd3a2faf98d1d6145b4f0ada3f | [
"OLDAP-2.3"
] | null | null | null | from twisted.web import http, resource
from EPGRefresh import epgrefresh
from EPGRefreshService import EPGRefreshService
from enigma import eServiceReference
from Components.config import config
from Components.SystemInfo import SystemInfo
from time import localtime
from OrderedSet import OrderedSet
from ServiceReference import ServiceReference
from Tools.XMLTools import stringToXML
try:
from urllib import unquote
iteritems = lambda d: d.iteritems()
except ImportError as ie:
from urllib.parse import unquote
iteritems = lambda d: d.items()
API_VERSION = "1.4"
class EPGRefreshStartRefreshResource(resource.Resource):
def render(self, req):
state = False
if epgrefresh.forceRefresh():
output = "initiated refresh"
state = True
else:
output = "could not initiate refresh"
req.setResponseCode(http.OK)
req.setHeader('Content-type', 'application/xhtml+xml')
req.setHeader('charset', 'UTF-8')
return """<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<e2simplexmlresult>
<e2state>%s</e2state>
<e2statetext>%s</e2statetext>
</e2simplexmlresult>""" % ('true' if state else 'false', output)
class EPGRefreshAddRemoveServiceResource(resource.Resource):
TYPE_ADD = 0
TYPE_DEL = 1
def __init__(self, type):
assert(type in (self.TYPE_ADD, self.TYPE_DEL))
self.type = type
def render(self, req):
do_add = self.type == self.TYPE_ADD
state = False
if 'multi' in req.args:
if epgrefresh.services[0]:
epgrefresh.services[0].clear()
state = True
if epgrefresh.services[1]:
epgrefresh.services[1].clear()
state = True
if 'sref' in req.args:
duration = req.args.get("duration", None)
try:
duration = duration and int(duration)
except ValueError as ve:
output = 'invalid value for "duration": ' + str(duration)
else:
for sref in req.args.get('sref'):
sref = unquote(sref)
ref = eServiceReference(sref)
if not ref.valid():
output = 'invalid value for "sref": ' + str(sref)
elif (ref.flags & 7) == 7:
epgservice = EPGRefreshService(sref, duration)
# bouquet
if epgservice in epgrefresh.services[1]:
if do_add:
output = "bouquet already in list"
else:
epgrefresh.services[1].remove(epgservice)
output = "bouquet removed from list"
state = True
else:
if do_add:
epgrefresh.services[1].add(epgservice)
output = "bouquet added to list"
state = True
else:
output = "bouquet not in list"
else:
if not (ref.flags & eServiceReference.isGroup):
# strip all after last :
pos = sref.rfind(':')
if pos != -1:
if sref[pos-1] == ':':
pos -= 1
sref = sref[:pos+1]
epgservice = EPGRefreshService(sref, duration)
# assume service
if epgservice in epgrefresh.services[0]:
if do_add:
output = "service already in list"
else:
epgrefresh.services[0].remove(epgservice)
output = "service removed from list"
state = True
else:
if do_add:
epgrefresh.services[0].add(epgservice)
output = "service added to list"
state = True
else:
output = "service not in list"
# save if list changed
if state:
epgrefresh.saveConfiguration()
else:
output = 'missing argument "sref"'
if 'multi' in req.args:
output = 'service restriction changed'
req.setResponseCode(http.OK)
req.setHeader('Content-type', 'application/xhtml+xml')
req.setHeader('charset', 'UTF-8')
return """<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<e2simplexmlresult>
<e2state>%s</e2state>
<e2statetext>%s</e2statetext>
</e2simplexmlresult> """ % ('True' if state else 'False', output)
class EPGRefreshListServicesResource(resource.Resource):
def render(self, req):
# show xml
req.setResponseCode(http.OK)
req.setHeader('Content-type', 'application/xhtml+xml')
req.setHeader('charset', 'UTF-8')
return ''.join(epgrefresh.buildConfiguration(webif = True))
class EPGRefreshPreviewServicesResource(resource.Resource):
def render(self, req):
req.setResponseCode(http.OK)
req.setHeader('Content-type', 'application/xhtml+xml')
req.setHeader('charset', 'UTF-8')
if 'sref' in req.args:
services = OrderedSet()
bouquets = OrderedSet()
for sref in req.args.get('sref'):
sref = unquote(sref)
ref = eServiceReference(sref)
if not ref.valid():
services = bouquets = None
break
elif (ref.flags & 7) == 7:
epgservice = EPGRefreshService(sref, None)
if epgservice not in bouquets:
bouquets.add(epgservice)
else:
if not (ref.flags & eServiceReference.isGroup):
# strip all after last :
pos = sref.rfind(':')
if pos != -1:
if sref[pos-1] == ':':
pos -= 1
sref = sref[:pos+1]
epgservice = EPGRefreshService(sref, None)
if epgservice not in services:
services.add(epgservice)
if services is not None and bouquets is not None:
scanServices = epgrefresh.generateServicelist(services, bouquets)
else:
scanServices = []
else:
scanServices = epgrefresh.generateServicelist(epgrefresh.services[0], epgrefresh.services[1])
returnlist = ["<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<e2servicelist>"]
extend = returnlist.extend
for serviceref in scanServices:
ref = ServiceReference(str(serviceref))
returnlist.extend((
' <e2service>\n',
' <e2servicereference>', stringToXML(str(serviceref)), '</e2servicereference>\n',
' <e2servicename>', stringToXML(ref.getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', '')), '</e2servicename>\n',
' </e2service>\n',
))
returnlist.append('\n</e2servicelist>')
return ''.join(returnlist)
class EPGRefreshChangeSettingsResource(resource.Resource):
def render(self, req):
statetext = "config changed."
for key, value in iteritems(req.args):
value = value[0]
if key == "enabled":
config.plugins.epgrefresh.enabled.value = True if value == "true" else False
elif key == "enablemessage":
config.plugins.epgrefresh.enablemessage.value = True if value == "true" else False
elif key == "begin":
value = int(value)
if value:
t = localtime(value)
config.plugins.epgrefresh.begin.value = [t.tm_hour, t.tm_min]
elif key == "end":
value = int(value)
if value:
t = localtime(int(value))
config.plugins.epgrefresh.end.value = [t.tm_hour, t.tm_min]
elif key == "interval_seconds":
value = int(value)
if value:
config.plugins.epgrefresh.interval_seconds.value = value
elif key == "delay_standby":
value = int(value)
if value:
config.plugins.epgrefresh.delay_standby.value = value
elif key == "inherit_autotimer":
config.plugins.epgrefresh.inherit_autotimer.value = True if value == "true" else False
elif key == "afterevent":
config.plugins.epgrefresh.afterevent.value = True if value == "true" else False
elif key == "force":
config.plugins.epgrefresh.force.value = True if value == "true" else False
elif key == "wakeup":
config.plugins.epgrefresh.wakeup.value = True if value == "true" else False
elif key == "parse_autotimer":
if value in config.plugins.epgrefresh.parse_autotimer.choices:
config.plugins.epgrefresh.parse_autotimer.value = value
elif value == "true":
config.plugins.epgrefresh.parse_autotimer.value = "always"
else:
config.plugins.epgrefresh.parse_autotimer.value = "never"
elif key == "adapter":
if value in config.plugins.epgrefresh.adapter.choices:
config.plugins.epgrefresh.adapter.value = value
elif key == "skipProtectedServices":
if value in config.plugins.epgrefresh.skipProtectedServices.choices:
config.plugins.epgrefresh.skipProtectedServices.value = value
config.plugins.epgrefresh.save()
if config.plugins.epgrefresh.enabled.value:
epgrefresh.start()
else:
epgrefresh.stop()
req.setResponseCode(http.OK)
req.setHeader('Content-type', 'application/xhtml+xml')
req.setHeader('charset', 'UTF-8')
return """<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<e2simplexmlresult>
<e2state>true</e2state>
<e2statetext>%s</e2statetext>
</e2simplexmlresult>""" % (statetext,)
class EPGRefreshSettingsResource(resource.Resource):
def render(self, req):
req.setResponseCode(http.OK)
req.setHeader('Content-type', 'application/xhtml+xml')
req.setHeader('charset', 'UTF-8')
from time import time, localtime, mktime
now = localtime()
begin_h = config.plugins.epgrefresh.begin.value
begin = mktime((
now.tm_year, now.tm_mon, now.tm_mday, begin_h[0], begin_h[1],
0, now.tm_wday, now.tm_yday, now.tm_isdst)
)
end_h = config.plugins.epgrefresh.end.value
end = mktime((
now.tm_year, now.tm_mon, now.tm_mday, end_h[0], end_h[1],
0, now.tm_wday, now.tm_yday, now.tm_isdst)
)
canDoBackgroundRefresh = SystemInfo.get("NumVideoDecoders", 1) > 1
hasAutoTimer = False
try:
from Plugins.Extensions.AutoTimer.AutoTimer import AutoTimer
hasAutoTimer = True
except ImportError as ie: pass
return """<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<e2settings>
<e2setting>
<e2settingname>config.plugins.epgrefresh.enabled</e2settingname>
<e2settingvalue>%s</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>config.plugins.epgrefresh.enablemessage</e2settingname>
<e2settingvalue>%s</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>config.plugins.epgrefresh.begin</e2settingname>
<e2settingvalue>%d</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>config.plugins.epgrefresh.end</e2settingname>
<e2settingvalue>%d</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>config.plugins.epgrefresh.interval_seconds</e2settingname>
<e2settingvalue>%d</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>config.plugins.epgrefresh.delay_standby</e2settingname>
<e2settingvalue>%d</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>config.plugins.epgrefresh.inherit_autotimer</e2settingname>
<e2settingvalue>%s</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>config.plugins.epgrefresh.afterevent</e2settingname>
<e2settingvalue>%s</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>config.plugins.epgrefresh.force</e2settingname>
<e2settingvalue>%s</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>config.plugins.epgrefresh.wakeup</e2settingname>
<e2settingvalue>%s</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>config.plugins.epgrefresh.parse_autotimer</e2settingname>
<e2settingvalue>%s</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>config.plugins.epgrefresh.lastscan</e2settingname>
<e2settingvalue>%d</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>config.plugins.epgrefresh.adapter</e2settingname>
<e2settingvalue>%s</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>config.plugins.epgrefresh.skipProtectedServices</e2settingname>
<e2settingvalue>%s</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>canDoBackgroundRefresh</e2settingname>
<e2settingvalue>%s</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>hasAutoTimer</e2settingname>
<e2settingvalue>%s</e2settingvalue>
</e2setting>
<e2setting>
<e2settingname>api_version</e2settingname>
<e2settingvalue>%s</e2settingvalue>
</e2setting>
</e2settings>""" % (
config.plugins.epgrefresh.enabled.value,
config.plugins.epgrefresh.enablemessage.value,
begin,
end,
config.plugins.epgrefresh.interval_seconds.value,
config.plugins.epgrefresh.delay_standby.value,
config.plugins.epgrefresh.inherit_autotimer.value,
config.plugins.epgrefresh.afterevent.value,
config.plugins.epgrefresh.force.value,
config.plugins.epgrefresh.wakeup.value,
config.plugins.epgrefresh.parse_autotimer.value,
config.plugins.epgrefresh.lastscan.value,
config.plugins.epgrefresh.adapter.value,
config.plugins.epgrefresh.skipProtectedServices.value,
canDoBackgroundRefresh,
hasAutoTimer,
API_VERSION,
)
| 32.317333 | 129 | 0.702286 |
38d7b6c57d4acd4b162b007912161c2f75df5e4b | 14,928 | py | Python | modules/eden/doc.py | unimauro/eden | b739d334e6828d0db14b3790f2f5e2666fc83576 | [
"MIT"
] | null | null | null | modules/eden/doc.py | unimauro/eden | b739d334e6828d0db14b3790f2f5e2666fc83576 | [
"MIT"
] | null | null | null | modules/eden/doc.py | unimauro/eden | b739d334e6828d0db14b3790f2f5e2666fc83576 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" Sahana Eden Document Library
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3DocumentLibrary",
"doc_image_represent"]
import os
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3DocumentLibrary(S3Model):
names = ["doc_entity",
"doc_document",
"doc_image"]
def model(self):
T = current.T
db = current.db
request = current.request
s3 = current.response.s3
person_comment = self.pr_person_comment
person_id = self.pr_person_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
messages = current.messages
NONE = messages.NONE
UNKNOWN_OPT = messages.UNKNOWN_OPT
s3_date_format = current.deployment_settings.get_L10n_date_format()
s3_date_represent = lambda dt: S3DateTime.date_represent(dt, utc=True)
# Shortcuts
add_component = self.add_component
comments = s3.comments
configure = self.configure
define_table = self.define_table
meta_fields = s3.meta_fields
super_link = self.super_link
# ---------------------------------------------------------------------
# Document-referencing entities
#
entity_types = Storage(asset_asset=T("Asset"),
irs_ireport=T("Incident Report"),
project_project=T("Project"),
project_activity=T("Project Activity"),
project_task=T("Task"),
hms_hospital=T("Hospital"))
tablename = "doc_entity"
doc_entity = self.super_entity(tablename, "doc_id", entity_types)
# Components
add_component("doc_document", doc_entity=self.super_key(doc_entity))
add_component("doc_image", doc_entity=self.super_key(doc_entity))
# ---------------------------------------------------------------------
# Documents
#
tablename = "doc_document"
table = define_table(tablename,
super_link("site_id", "org_site"),
super_link("doc_id", doc_entity),
Field("name", length=128,
notnull=True,
label=T("Name")),
Field("file", "upload", autodelete=True),
Field("url", label=T("URL"),
requires = [IS_NULL_OR(IS_URL()),
IS_NULL_OR(IS_NOT_ONE_OF(db,
"%s.url" % tablename))],
represent = lambda url: \
url and A(url,_href=url) or NONE),
person_id(label=T("Author"),
comment=person_comment(T("Author"),
T("The Author of this Document (optional)"))),
organisation_id(widget = S3OrganisationAutocompleteWidget(default_from_profile = True)),
Field("date", "date",
label = T("Date Published"),
represent = s3_date_represent,
requires = IS_NULL_OR(IS_DATE(format = s3_date_format)),
widget = S3DateWidget()
),
location_id(),
comments(),
#Field("entered", "boolean", label=T("Entered")),
Field("checksum", readable=False, writable=False),
*meta_fields())
# Field configuration
table.file.represent = lambda file, table=table: \
self.doc_file_represent(file, table)
#table.location_id.readable = False
#table.location_id.writable = False
#table.entered.comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Entered"),
# T("Has data from this Reference Document been entered into Sahana?")))
# CRUD Strings
DOCUMENT = T("Reference Document")
ADD_DOCUMENT = T("Add Reference Document")
LIST_DOCUMENTS = T("List Documents")
s3.crud_strings[tablename] = Storage(
title_create = ADD_DOCUMENT,
title_display = T("Document Details"),
title_list = LIST_DOCUMENTS,
title_update = T("Edit Document"),
title_search = T("Search Documents"),
subtitle_create = T("Add New Document"),
subtitle_list = DOCUMENT,
label_list_button = LIST_DOCUMENTS,
label_create_button = ADD_DOCUMENT,
label_delete_button = T("Delete Document"),
msg_record_created = T("Document added"),
msg_record_modified = T("Document updated"),
msg_record_deleted = T("Document deleted"),
msg_list_empty = T("No Documents found")
)
# Search Method?
# Resource Configuration
configure(tablename,
#mark_required=["file"],
onvalidation=self.document_onvalidation)
# ---------------------------------------------------------------------
# Images
#
doc_image_type_opts = {
1:T("Photograph"),
2:T("Map"),
3:T("Document Scan"),
99:T("other")
}
tablename = "doc_image"
table = define_table(tablename,
super_link("site_id", "org_site"),
super_link("pe_id", "pr_pentity"),
super_link("doc_id", doc_entity),
Field("name", length=128,
notnull=True,
label=T("Name")),
Field("file", "upload", autodelete=True,
requires = IS_NULL_OR(IS_IMAGE(extensions=(s3.IMAGE_EXTENSIONS))),
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(request.folder,
"uploads",
"images")),
Field("url", label=T("URL"),
requires = IS_NULL_OR(IS_URL())),
Field("type", "integer",
requires = IS_IN_SET(doc_image_type_opts, zero=None),
default = 1,
label = T("Image Type"),
represent = lambda opt: doc_image_type_opts.get(opt, UNKNOWN_OPT)),
person_id(label=T("Author")),
organisation_id(widget = S3OrganisationAutocompleteWidget(default_from_profile = True)),
location_id(),
Field("date", "date",
label = T("Date Taken"),
represent = s3_date_represent,
requires = IS_NULL_OR(IS_DATE(format = s3_date_format)),
widget = S3DateWidget()
),
comments(),
Field("checksum", readable=False, writable=False),
*meta_fields())
# Field configuration
table.file.represent = doc_image_represent
# CRUD Strings
ADD_IMAGE = T("Add Photo")
LIST_IMAGES = T("List Photos")
s3.crud_strings[tablename] = Storage(
title_create = ADD_IMAGE,
title_display = T("Photo Details"),
title_list = LIST_IMAGES,
title_update = T("Edit Photo"),
title_search = T("Search Photos"),
subtitle_create = T("Add New Photo"),
subtitle_list = T("Photo"),
label_list_button = LIST_IMAGES,
label_create_button = ADD_IMAGE,
label_delete_button = T("Delete Photo"),
msg_record_created = T("Photo added"),
msg_record_modified = T("Photo updated"),
msg_record_deleted = T("Photo deleted"),
msg_list_empty = T("No Photos found"))
# Search Method
# Resource Configuration
configure(tablename,
onvalidation=lambda form: \
self.document_onvalidation(form, document=False))
# ---------------------------------------------------------------------
# Pass model-global names to response.s3
#
return Storage()
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults if the module is disabled """
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def doc_file_represent(file, table):
""" File representation """
if file:
return A(table.file.retrieve(file)[0],
_href=URL(c="default", f="download", args=[file]))
else:
return current.messages.NONE
# -------------------------------------------------------------------------
@staticmethod
def document_represent(id):
""" Foreign key representation """
if not id:
return current.messages.NONE
represent = s3_get_db_field_value(tablename = "doc_document",
fieldname = "name",
look_up_value = id)
return A(represent,
_href = URL(c="doc", f="document", args=[id], extension=""),
_target = "blank")
# -------------------------------------------------------------------------
@staticmethod
def document_onvalidation(form, document=True):
""" Form validation for both, documents and images """
import cgi
T = current.T
db = current.db
s3db = current.s3db
request = current.request
if document:
tablename = "doc_document"
msg = T("Either file upload or document URL required.")
else:
tablename = "doc_image"
msg = T("Either file upload or image URL required.")
table = s3db[tablename]
doc = form.vars.file
url = form.vars.url
if not hasattr(doc, "file"):
id = request.post_vars.id
if id:
record = db(table.id == id).select(table.file,
limitby=(0, 1)).first()
if record:
doc = record.file
if not hasattr(doc, "file") and not doc and not url:
form.errors.file = msg
form.errors.url = msg
# Do a checksum on the file to see if it's a duplicate
if isinstance(doc, cgi.FieldStorage) and doc.filename:
f = doc.file
form.vars.checksum = doc_checksum(f.read())
f.seek(0)
if form.vars.checksum is not None:
# Duplicate allowed if original version is deleted
query = ((table.checksum == form.vars.checksum) & \
(table.deleted == False))
result = db(query).select(table.name,
limitby=(0, 1)).first()
if result:
doc_name = result.name
form.errors["file"] = "%s %s" % \
(T("This file already exists on the server as"), doc_name)
return
# =============================================================================
def doc_image_represent(filename):
"""
Represent an image as a clickable thumbnail
@param filename: name of the image file
"""
return DIV(A(IMG(_src=URL(c="default", f="download",
args=filename),
_height=40),
_class="zoom",
_href=URL(c="default", f="download",
args=filename)))
# @todo: implement/activate the JavaScript for this:
#import uuid
#anchor = "zoom-media-image-%s" % uuid.uuid4()
#return DIV(A(IMG(_src=URL(c="default", f="download",
#args=filename),
#_height=40),
#_class="zoom",
#_href="#%s" % anchor),
#DIV(IMG(_src=URL(c="default", f="download",
#args=filename),
#_width=600),
#_id="%s" % anchor,
#_class="hide"))
# =============================================================================
def doc_checksum(docstr):
""" Calculate a checksum for a file """
import hashlib
converted = hashlib.sha1(docstr).hexdigest()
return converted
# END =========================================================================
| 41.237569 | 125 | 0.470056 |
59edea0cd015d7849aa96dac0684d95e5a2184fb | 3,071 | py | Python | build-tools/code_generator/function_generator/generate_include_nbla_function_hpp.py | enomotom/nnabla | 1947fe16a0a41d19d76cd916f151aa1991ea1b44 | [
"Apache-2.0"
] | 1 | 2021-04-08T00:33:23.000Z | 2021-04-08T00:33:23.000Z | build-tools/code_generator/function_generator/generate_include_nbla_function_hpp.py | enomotom/nnabla | 1947fe16a0a41d19d76cd916f151aa1991ea1b44 | [
"Apache-2.0"
] | null | null | null | build-tools/code_generator/function_generator/generate_include_nbla_function_hpp.py | enomotom/nnabla | 1947fe16a0a41d19d76cd916f151aa1991ea1b44 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import generator_common.common as common
import utils.type_conv
def generate(info, func_name, func_name_snakecase, template):
arg_info = common.function_arguments(info)
func_arg_variable_defines = '\n'.join(['protected:'] + ['{} {}_;'.format(utils.type_conv.type_from_proto[
t]['cpp_var'], n) for t, n in zip(arg_info['types'], arg_info['names'])])
func_arg_variable_types = ', '.join(
[func_name] + [utils.type_conv.type_from_proto[t]['cpp'] for t in arg_info['types']])
func_arg_initializers = ', ' + ', '.join(['{0}_({0})'.format(n) for n in arg_info['names']])
func_arg_variables = ', '.join(
['ctx_'] + ['{}_'.format(n) for n in arg_info['names']])
func_args = ', '.join(['const Context &ctx'] + ['{} {}'.format(utils.type_conv.type_from_proto[
t]['cpp'], n) for t, n in zip(arg_info['types'], arg_info['names'])])
io_info = common.function_io(info)
template_defines = ', '.join(['typename {}'.format(t)
for t in io_info['templates']])
in_types = ', '.join(['get_dtype<{}>()'.format(t)
for t in io_info['input']['types']])
out_types = ', '.join(['get_dtype<{}>()'.format(t)
for t in io_info['output']['types']])
min_inputs = io_info['input']['min']
min_outputs = io_info['output']['min']
base_function_types = ', '.join([utils.type_conv.type_from_proto[t]['cpp'] for t in arg_info['types']])
base_function_args = ', '.join(['ctx'] + ['{}'.format(n) for n in arg_info['names']])
return template.format(func_name=func_name,
func_name_upcase=func_name.upper(),
template_defines=template_defines,
func_args=func_args,
func_arg_variable_defines=func_arg_variable_defines,
func_arg_variable_types=func_arg_variable_types,
func_arg_variables=func_arg_variables,
func_arg_initializers=func_arg_initializers,
in_types=in_types,
out_types=out_types,
min_inputs=min_inputs,
min_outputs=min_outputs,
base_function_types=base_function_types,
base_function_args=base_function_args)
| 52.050847 | 115 | 0.596874 |
6b45eae37df05e31fcd4f361da0d0c026799ea8f | 493 | py | Python | numba/cuda/simulator/cudadrv/nvvm.py | blair1306/numba | 3b9647d17d653abac15363da604eeb804dbdd15a | [
"BSD-2-Clause"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | numba/cuda/simulator/cudadrv/nvvm.py | blair1306/numba | 3b9647d17d653abac15363da604eeb804dbdd15a | [
"BSD-2-Clause"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | numba/cuda/simulator/cudadrv/nvvm.py | blair1306/numba | 3b9647d17d653abac15363da604eeb804dbdd15a | [
"BSD-2-Clause"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | '''
NVVM is not supported in the simulator, but stubs are provided to allow tests
to import correctly.
'''
class NvvmSupportError(ImportError):
pass
class NVVM(object):
def __init__(self):
raise NvvmSupportError('NVVM not supported in the simulator')
CompilationUnit = None
llvm_to_ptx = None
set_cuda_kernel = None
fix_data_layout = None
get_arch_option = None
LibDevice = None
NvvmError = None
def is_available():
return False
def get_supported_ccs():
return ()
| 18.961538 | 77 | 0.742394 |
7d3508373aaa604580dc5ad546f4655943e26c3b | 3,373 | py | Python | Scrapy/WebData/WebData/spiders/Qidian.py | LieonShelly/PythonFun | 811760d368885109f9359c2663d8ce74886f6ad6 | [
"MIT"
] | null | null | null | Scrapy/WebData/WebData/spiders/Qidian.py | LieonShelly/PythonFun | 811760d368885109f9359c2663d8ce74886f6ad6 | [
"MIT"
] | null | null | null | Scrapy/WebData/WebData/spiders/Qidian.py | LieonShelly/PythonFun | 811760d368885109f9359c2663d8ce74886f6ad6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from scrapy import Spider
from scrapy import Request
from urllib.parse import urlencode
from WebData.items import BookItem
from WebData.items import ChapterItem
from WebData.items import ContentItem
class QidianSpider(Spider):
name = 'Qidian'
allowed_domains = ['www.qidian.com', 'book.qidian.com', 'read.qidian.com']
def start_requests(self):
# yield Request("https://read.qidian.com/chapter/8s7RmxQkntY3LbtcZNMchg2/fn_NgkMI2962uJcMpdsVgA2",callback=self.parseChapterContent)
data = {"page": 1}
maxPage = 1
baseURL = "https://www.qidian.com/free/all?"
for page in range(1, maxPage + 1):
data["page"] = page
url = baseURL + urlencode(data)
yield Request(url=url)
def parse(self, response):
list = response.css("div.all-book-list")
for item in list:
uls = item.css('.book-img-text').xpath('./ul')
for ul in uls:
lis = ul.xpath('./li')
for li in lis:
img = li.css('.book-img-box').xpath('./a/@href').extract_first()
detailURI = li.css('.book-mid-info').xpath('./h4//a/@href').extract_first()
bookTitle = li.css('.book-mid-info').xpath('./h4/a/text()').extract_first()
intro = li.css('.book-mid-info').css("p.intro::text").extract_first()
lastUpdateTitle = li.css('.book-mid-info').css('p.update::text').extract_first()
lastUpdateURI = li.css('.book-mid-info').css('p.update').xpath('./a/@href').extract_first()
catlog = "https:" + detailURI + "#Catalog"
bookItem = BookItem()
bookItem['detailURI'] = "https:" + detailURI
bookItem['bookTitle'] = bookTitle
bookItem['intro'] = intro
yield bookItem
yield Request(catlog,callback=self.parseChapterList)
def parseChapterList(self, response):
divWrap = response.css("div.volume-wrap")
for volume in divWrap:
uls = volume.css("ul.cf")
for ul in uls:
chapterContentURIs = ul.xpath('./li/a/@href')
chapterTitles = ul.xpath('./li/a/text()')
for (chapterContentURI, chapterTitle) in zip(chapterContentURIs, chapterTitles):
content = "https:" + chapterContentURI.extract()
chapater = ChapterItem()
chapater['chapterContentURI'] = content
chapater['chapterTitle'] = chapterTitle.extract()
chapater['detailURI'] = response.url
yield chapater
yield Request(content,callback=self.parseChapterContent)
def parseChapterContent(self, response):
chapterURI = response.url
contents = response.css('div.read-content').xpath('./p/text()')
contentStr = "".join(contents.extract()).replace('\u3000', "")
content = ContentItem()
content['content'] = contentStr
content['chapterContentURI'] = chapterURI
yield content
# 章节内容 https://read.qidian.com/chapter/j1uUFQtIDcYuTkiRw_sFYA2/j3uJh4HjyHn4p8iEw--PPw2
# 目录列表 https://book.qidian.com/info/1005269238#Catalog
# 书籍列表
| 43.805195 | 140 | 0.572191 |
0fa1ae524dcb8eb637c890f5a7f61ff426e3505b | 1,230 | py | Python | tests/test_singleton.py | tipofthesowrd/SoCo | 56bdc655b323d9178b591cab96b58dd28e5155f2 | [
"MIT"
] | 1,149 | 2015-01-02T02:08:34.000Z | 2022-03-30T13:58:04.000Z | tests/test_singleton.py | tipofthesowrd/SoCo | 56bdc655b323d9178b591cab96b58dd28e5155f2 | [
"MIT"
] | 630 | 2015-01-01T10:44:22.000Z | 2022-03-17T00:25:55.000Z | tests/test_singleton.py | tipofthesowrd/SoCo | 56bdc655b323d9178b591cab96b58dd28e5155f2 | [
"MIT"
] | 249 | 2015-01-07T20:11:10.000Z | 2022-03-14T05:54:20.000Z | """Tests for the SoCoSingletonBase and _ArgsSingleton classes in core."""
import pytest
from soco.core import _SocoSingletonBase as Base
class ASingleton(Base):
def __init__(self, arg):
pass
class AnotherSingleton(ASingleton):
pass
class ThirdSingleton(Base):
_class_group = "somegroup"
def __init__(self, arg):
pass
class FourthSingleton(ASingleton):
_class_group = "somegroup"
pass
def test_singleton():
"""Check basic functionality.
For a given arg, there is only one instance
"""
assert ASingleton("aa") is ASingleton("aa")
assert ASingleton("aa") is not ASingleton("bb")
def test_singleton_inherit():
"""Check that subclasses behave properly."""
assert ASingleton("aa") is not AnotherSingleton("aa")
assert AnotherSingleton("aa") is AnotherSingleton("aa")
def test_class_group_singleton():
"""Check _class_group functionality.
For a given arg, instances of FourthGroup are Instances of
ThirdGroup because they share a `_class_group` valur
"""
assert ThirdSingleton("aa") is FourthSingleton("aa")
assert ThirdSingleton("aa") is not FourthSingleton("bb")
assert ThirdSingleton("aa") is not ASingleton("aa")
| 22.777778 | 73 | 0.705691 |
b82c1daaef4ddcbdd7d2965c6bbb297dfbac3592 | 665 | py | Python | testCurveFit.py | CaptainEven/PyScripts | 5852501634e1ff86d2ff6820b33bfffcea7112c9 | [
"MIT"
] | 5 | 2020-10-27T06:39:00.000Z | 2020-11-01T17:19:03.000Z | testCurveFit.py | CaptainEven/PyScripts | 5852501634e1ff86d2ff6820b33bfffcea7112c9 | [
"MIT"
] | null | null | null | testCurveFit.py | CaptainEven/PyScripts | 5852501634e1ff86d2ff6820b33bfffcea7112c9 | [
"MIT"
] | 2 | 2020-10-27T15:30:10.000Z | 2020-11-01T17:38:05.000Z | from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import numpy as np
def func_1(x, a, b, c):
return a * np.exp(b * x) + c
def func_2(x, a, b):
return x**a + b
x_data = np.linspace(0, 4, 50)
# y = func_1(x_data, 2.5, 1.5, 0.5)
y = func_2(x_data, 2.5, 1.3)
print('origin a: 2.50, b: 1.30')
y_data = y + 0.5 * np.random.normal(size=len(x_data))
plt.plot(x_data, y_data, 'b-')
popt, pcov = curve_fit(func_1, x_data, y_data)
# y2 = [func_1(i, popt[0], popt[1], popt[2]) for i in x_data]
y2 = [func_2(x_val, popt[0], popt[1]) for x_val in x_data]
plt.plot(x_data, y2, 'r--')
print('fitted a: %.2f, b: %.2f' %(popt[0], popt[1]))
plt.show()
| 26.6 | 61 | 0.622556 |
de940a7c2991796eee9fdeb88cfe60e809c8aff9 | 119 | py | Python | src/tuberlin/inventory/emails.py | CircularBerlin/gmit | fbc995f631fa42815deab5cf07e8115a1b964d90 | [
"MIT"
] | null | null | null | src/tuberlin/inventory/emails.py | CircularBerlin/gmit | fbc995f631fa42815deab5cf07e8115a1b964d90 | [
"MIT"
] | null | null | null | src/tuberlin/inventory/emails.py | CircularBerlin/gmit | fbc995f631fa42815deab5cf07e8115a1b964d90 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.template.loader import get_template
from django.core.mail import send_mail | 39.666667 | 47 | 0.865546 |
3491403825aa685bf29e40ea61c30f312c6f0b46 | 2,489 | py | Python | pygameweb/static/views.py | leereilly/pygameweb | c4df39770716bf737f3379da987538fa32f9f3d8 | [
"BSD-2-Clause"
] | 2 | 2021-05-11T19:29:03.000Z | 2021-05-12T06:08:00.000Z | pygameweb/static/views.py | leereilly/pygameweb | c4df39770716bf737f3379da987538fa32f9f3d8 | [
"BSD-2-Clause"
] | null | null | null | pygameweb/static/views.py | leereilly/pygameweb | c4df39770716bf737f3379da987538fa32f9f3d8 | [
"BSD-2-Clause"
] | null | null | null | """Adding static folders.
"""
from flask import Blueprint, send_from_directory
from pathlib import Path
static_blueprint = Blueprint('static',
__name__,
template_folder='../templates/')
files = ['lofi.html', 'server.json', 'hifi.html', 'favicon.ico', 'robots.txt']
folders = ['content', 'contests', 'css', 'ctypes', 'docs', 'docs-old', 'ftp',
'galleries', 'gamelets', 'games', 'html5media', 'images',
'interview', 'iscroll', 'js', 'ludumcontest1', 'ludumcontest2',
'mediaelement', 'mediagit', 'music', 'neu', 'new', 'old',
'old_bug_attachments', 'oldhtml', 'pcr', 'pcr_old', 'pygame_wincvs',
'search', 'shots', 'shredwheat', 'siteswing', 'skins', 'swfs', 'test',
'thumb', 'tiny_mce', 'tinymce_3_2_3_1', 'tmp', 'webalizer']
def is_there(app, full_path, file_folder):
full_path = Path('..', app.config['WWW']) / file_folder
return full_path.exists()
def add_folder(app, static_blueprint, folder):
"""Add a static folder.
"""
full_path = Path(app.config['WWW']) / folder
full_path_index = Path(app.config['WWW']) / folder / 'index.html'
full_path_str = str(full_path.absolute())
if not full_path.exists():
return
def download_file(path):
return send_from_directory(full_path_str, path)
url = f'/{folder}/<path:path>'
path = f'static_{folder}'
app.add_url_rule(url, path, download_file)
if full_path_index.exists():
def download_index_file():
return send_from_directory(full_path_str, 'index.html')
url = f'/{folder}/'
path = f'static_{folder}_index'
app.add_url_rule(url, path, download_index_file)
def add_file(app, static_blueprint, file):
"""Add a file to serve.
"""
full_path = Path(app.config['WWW'])
full_path_str = str(full_path.absolute())
if not (full_path / file).exists():
return
def download_file():
return send_from_directory(full_path_str, file)
url = f'/{file}'
path = f'static_{file}'
app.add_url_rule(url, path, download_file)
def add_static_blueprint(app):
""" to the app.
"""
for folder in folders:
add_folder(app, static_blueprint, folder)
for file in files:
add_file(app, static_blueprint, file)
from pygameweb.cache import limiter
limiter.limit("1000/hour")(static_blueprint)
app.register_blueprint(static_blueprint)
| 30.728395 | 81 | 0.626758 |
6ee1c27f2c95ce066a2c361ed768aef48cf51d19 | 566 | py | Python | autojail/utils/__init__.py | ekut-es/autojail | bc16e40e6df55c0a28a3059715851ffa59b14ba8 | [
"MIT"
] | 6 | 2020-08-12T08:16:15.000Z | 2022-03-05T02:25:53.000Z | autojail/utils/__init__.py | ekut-es/autojail | bc16e40e6df55c0a28a3059715851ffa59b14ba8 | [
"MIT"
] | 1 | 2021-03-30T10:34:51.000Z | 2021-06-09T11:24:00.000Z | autojail/utils/__init__.py | ekut-es/autojail | bc16e40e6df55c0a28a3059715851ffa59b14ba8 | [
"MIT"
] | 1 | 2021-11-21T09:30:58.000Z | 2021-11-21T09:30:58.000Z | from .board import start_board, stop_board
from .collections import SortedCollection
from .connection import connect # noqa
from .debug import debug
from .deploy import deploy_target
from .fs import which
from .intervall_arithmetic import get_overlap
from .logging import ClikitLoggingHandler
from .string import pprint_tree, remove_prefix
__all__ = [
"connect",
"remove_prefix",
"ClikitLoggingHandler",
"SortedCollection",
"debug",
"pprint_tree",
"get_overlap",
"which",
"start_board",
"stop_board",
"deploy_target",
]
| 23.583333 | 46 | 0.736749 |
b0773ef73a68f1895d06caf86f2630117313dee4 | 3,145 | py | Python | bing_img_collector.py | kita83/bing_image_collector | 0f3163c7e7bf52b41400946434f022dc1bc1698d | [
"Apache-2.0"
] | null | null | null | bing_img_collector.py | kita83/bing_image_collector | 0f3163c7e7bf52b41400946434f022dc1bc1698d | [
"Apache-2.0"
] | null | null | null | bing_img_collector.py | kita83/bing_image_collector | 0f3163c7e7bf52b41400946434f022dc1bc1698d | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import requests
import os
import math
import configparser # for Python3
import urllib
import re
import datetime
import bing_util
def get_headers(api_key):
return {"Ocp-Apim-Subscription-Key": api_key}
def get_params(search_term, num_imgs_per_transaction, offset):
return urllib.parse.urlencode({
"q": search_term,
"license": "All",
"imageType": "photo",
"count": num_imgs_per_transaction,
"offset": offset * num_imgs_per_transaction,
"mkt": "ja-JP"})
def get_search_results(search_url, headers, params):
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
return search_results
def save_urls(results, filepath):
format_ = ['jpg', 'jpeg']
with open(filepath, mode='a') as f:
for values in results:
if values['encodingFormat'] in format_:
print(values['contentUrl'], file=f)
def get_filename(path, fn, ext):
return os.path.join(path, '%s.%s' % (fn, ext))
def gen_url_save_file(search_term, url_dir_path, total_count):
ext = 'txt'
fn = bing_util.search_term2file_name(search_term)
filename = get_filename(url_dir_path, fn, ext)
dt = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
if os.path.isfile(filename):
fn = '%s_%s' % (fn, dt)
filename = get_filename(url_dir_path, fn, ext)
with open(filename, mode='w') as f:
print("date=%s, search_term=%s, totalEstimatedMatches=%d" % (dt, search_term, total_count), file=f)
return filename
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read('authentication.ini')
bing_api_key = config['auth']['bing_api_key']
save_dir_path = './bing_img'
bing_util.make_dir(save_dir_path)
url_dir_path = os.path.join(save_dir_path, 'url')
bing_util.make_dir(url_dir_path)
num_imgs_required = 3 # Number of images you want.
num_imgs_per_transaction = 50 # default 30, Max 150 images
search_term = "dog"
search_url = "https://api.cognitive.microsoft.com/bing/v7.0/images/search"
headers = get_headers(bing_api_key)
params = get_params(search_term, num_imgs_per_transaction, 0)
first_search_results = get_search_results(search_url, headers, params)
total_count = first_search_results["totalEstimatedMatches"]
print("totalEstimatedMatches=%d" % total_count)
filepath = gen_url_save_file(search_term, url_dir_path, total_count)
print("len=%d" % (len(first_search_results["value"])))
save_urls(first_search_results["value"], filepath)
if num_imgs_required > total_count:
num_imgs_required = total_count
offset_count = math.ceil(num_imgs_required / num_imgs_per_transaction)
print('offset_count=%d' % offset_count)
for offset in range(1, offset_count):
params = get_params(search_term, num_imgs_per_transaction, offset)
search_results = get_search_results(search_url, headers, params)
print("len=%d" % len(search_results["value"]))
save_urls(search_results["value"], filepath)
| 31.767677 | 107 | 0.696343 |
4b6ba6399b3273df6f1270c20f23d2b7e8512a92 | 284 | py | Python | ca_qc_westmount/__init__.py | dcycle/scrapers-ca | 4c7a6cd01d603221b5b3b7a400d2e5ca0c6e916f | [
"MIT"
] | 19 | 2015-05-26T03:18:50.000Z | 2022-01-31T03:27:41.000Z | ca_qc_westmount/__init__.py | dcycle/scrapers-ca | 4c7a6cd01d603221b5b3b7a400d2e5ca0c6e916f | [
"MIT"
] | 119 | 2015-01-09T06:09:35.000Z | 2022-01-20T23:05:05.000Z | ca_qc_westmount/__init__.py | dcycle/scrapers-ca | 4c7a6cd01d603221b5b3b7a400d2e5ca0c6e916f | [
"MIT"
] | 17 | 2015-11-23T05:00:10.000Z | 2021-09-15T16:03:33.000Z | from utils import CanadianJurisdiction
class Westmount(CanadianJurisdiction):
classification = 'legislature'
division_id = 'ocd-division/country:ca/csd:2466032'
division_name = 'Westmount'
name = 'Conseil municipal de Westmount'
url = 'http://www.westmount.org'
| 28.4 | 55 | 0.739437 |
bed747a7170aaf89c9ff53d9c3da2698b564eaa4 | 9,470 | py | Python | tools/webdriver/webdriver/transport.py | qanat/wpt | 7c61a4594a95682531367b6956d1c37f8b8fd486 | [
"BSD-3-Clause"
] | 1 | 2021-12-12T18:13:24.000Z | 2021-12-12T18:13:24.000Z | tools/webdriver/webdriver/transport.py | qanat/wpt | 7c61a4594a95682531367b6956d1c37f8b8fd486 | [
"BSD-3-Clause"
] | 112 | 2021-09-27T14:39:02.000Z | 2022-03-30T14:26:35.000Z | tools/webdriver/webdriver/transport.py | qanat/wpt | 7c61a4594a95682531367b6956d1c37f8b8fd486 | [
"BSD-3-Clause"
] | null | null | null | import json
import select
from http.client import HTTPConnection
from typing import Dict, List, Mapping, Sequence, Tuple
from urllib import parse as urlparse
from . import error
"""Implements HTTP transport for the WebDriver wire protocol."""
missing = object()
class ResponseHeaders(Mapping[str, str]):
"""Read-only dictionary-like API for accessing response headers.
This class:
* Normalizes the header keys it is built with to lowercase (such that
iterating the items will return lowercase header keys).
* Has case-insensitive header lookup.
* Always returns all header values that have the same name, separated by
commas.
"""
def __init__(self, items: Sequence[Tuple[str, str]]):
self.headers_dict: Dict[str, List[str]] = {}
for key, value in items:
key = key.lower()
if key not in self.headers_dict:
self.headers_dict[key] = []
self.headers_dict[key].append(value)
def __getitem__(self, key):
"""Get all headers of a certain (case-insensitive) name. If there is
more than one, the values are returned comma separated"""
values = self.headers_dict[key.lower()]
if len(values) == 1:
return values[0]
else:
return ", ".join(values)
def get_list(self, key, default=missing):
"""Get all the header values for a particular field name as a list"""
try:
return self.headers_dict[key.lower()]
except KeyError:
if default is not missing:
return default
else:
raise
def __iter__(self):
yield from self.headers_dict
def __len__(self):
return len(self.headers_dict)
class Response:
"""
Describes an HTTP response received from a remote end whose
body has been read and parsed as appropriate.
"""
def __init__(self, status, body, headers):
self.status = status
self.body = body
self.headers = headers
def __repr__(self):
cls_name = self.__class__.__name__
if self.error:
return "<%s status=%s error=%s>" % (cls_name, self.status, repr(self.error))
return "<% status=%s body=%s>" % (cls_name, self.status, json.dumps(self.body))
def __str__(self):
return json.dumps(self.body, indent=2)
@property
def error(self):
if self.status != 200:
return error.from_response(self)
return None
@classmethod
def from_http(cls, http_response, decoder=json.JSONDecoder, **kwargs):
try:
body = json.load(http_response, cls=decoder, **kwargs)
headers = ResponseHeaders(http_response.getheaders())
except ValueError:
raise ValueError("Failed to decode response body as JSON:\n" +
http_response.read())
return cls(http_response.status, body, headers)
class HTTPWireProtocol:
"""
Transports messages (commands and responses) over the WebDriver
wire protocol.
Complex objects, such as ``webdriver.Element``, ``webdriver.Frame``,
and ``webdriver.Window`` are by default not marshaled to enable
use of `session.transport.send` in WPT tests::
session = webdriver.Session("127.0.0.1", 4444)
response = transport.send("GET", "element/active", None)
print response.body["value"]
# => {u'element-6066-11e4-a52e-4f735466cecf': u'<uuid>'}
Automatic marshaling is provided by ``webdriver.protocol.Encoder``
and ``webdriver.protocol.Decoder``, which can be passed in to
``HTTPWireProtocol.send`` along with a reference to the current
``webdriver.Session``::
session = webdriver.Session("127.0.0.1", 4444)
response = transport.send("GET", "element/active", None,
encoder=protocol.Encoder, decoder=protocol.Decoder,
session=session)
print response.body["value"]
# => webdriver.Element
"""
def __init__(self, host, port, url_prefix="/"):
"""
Construct interface for communicating with the remote server.
:param url: URL of remote WebDriver server.
:param wait: Duration to wait for remote to appear.
"""
self.host = host
self.port = port
self.url_prefix = url_prefix
self._conn = None
self._last_request_is_blocked = False
def __del__(self):
self.close()
def close(self):
"""Closes the current HTTP connection, if there is one."""
if self._conn:
try:
self._conn.close()
except OSError:
# The remote closed the connection
pass
self._conn = None
@property
def connection(self):
"""Gets the current HTTP connection, or lazily creates one."""
if not self._conn:
conn_kwargs = {}
# We are not setting an HTTP timeout other than the default when the
# connection its created. The send method has a timeout value if needed.
self._conn = HTTPConnection(self.host, self.port, **conn_kwargs)
return self._conn
def url(self, suffix):
"""
From the relative path to a command end-point,
craft a full URL suitable to be used in a request to the HTTPD.
"""
return urlparse.urljoin(self.url_prefix, suffix)
def send(self,
method,
uri,
body=None,
headers=None,
encoder=json.JSONEncoder,
decoder=json.JSONDecoder,
timeout=None,
**codec_kwargs):
"""
Send a command to the remote.
The request `body` must be JSON serialisable unless a
custom `encoder` has been provided. This means complex
objects such as ``webdriver.Element``, ``webdriver.Frame``,
and `webdriver.Window`` are not automatically made
into JSON. This behaviour is, however, provided by
``webdriver.protocol.Encoder``, should you want it.
Similarly, the response body is returned au natural
as plain JSON unless a `decoder` that converts web
element references to ``webdriver.Element`` is provided.
Use ``webdriver.protocol.Decoder`` to achieve this behaviour.
The client will attempt to use persistent HTTP connections.
:param method: `GET`, `POST`, or `DELETE`.
:param uri: Relative endpoint of the requests URL path.
:param body: Body of the request. Defaults to an empty
dictionary if ``method`` is `POST`.
:param headers: Additional dictionary of headers to include
in the request.
:param encoder: JSON encoder class, which defaults to
``json.JSONEncoder`` unless specified.
:param decoder: JSON decoder class, which defaults to
``json.JSONDecoder`` unless specified.
:param codec_kwargs: Surplus arguments passed on to `encoder`
and `decoder` on construction.
:return: Instance of ``webdriver.transport.Response``
describing the HTTP response received from the remote end.
:raises ValueError: If `body` or the response body are not
JSON serialisable.
"""
if body is None and method == "POST":
body = {}
payload = None
if body is not None:
try:
payload = json.dumps(body, cls=encoder, **codec_kwargs)
except ValueError:
raise ValueError("Failed to encode request body as JSON:\n"
"%s" % json.dumps(body, indent=2))
# When the timeout triggers, the TestRunnerManager thread will reuse
# this connection to check if the WebDriver its alive and we may end
# raising an httplib.CannotSendRequest exception if the WebDriver is
# not responding and this httplib.request() call is blocked on the
# runner thread. We use the boolean below to check for that and restart
# the connection in that case.
self._last_request_is_blocked = True
response = self._request(method, uri, payload, headers, timeout=None)
self._last_request_is_blocked = False
return Response.from_http(response, decoder=decoder, **codec_kwargs)
def _request(self, method, uri, payload, headers=None, timeout=None):
if isinstance(payload, str):
payload = payload.encode("utf-8")
if headers is None:
headers = {}
headers.update({"Connection": "keep-alive"})
url = self.url(uri)
if self._last_request_is_blocked or self._has_unread_data():
self.close()
self.connection.request(method, url, payload, headers)
# timeout for request has to be set just before calling httplib.getresponse()
# and the previous value restored just after that, even on exception raised
try:
if timeout:
previous_timeout = self._conn.gettimeout()
self._conn.settimeout(timeout)
response = self.connection.getresponse()
finally:
if timeout:
self._conn.settimeout(previous_timeout)
return response
def _has_unread_data(self):
return self._conn and self._conn.sock and select.select([self._conn.sock], [], [], 0)[0]
| 35.601504 | 96 | 0.618374 |
bcf72b5f13a0d174d200e1783f32c66110bd478a | 1,926 | py | Python | setup.py | sommersoft/Adafruit_CircuitPython_GC_IOT_Core | 7e1ab12200d41f13c3561fc13f7eaaca2da86a40 | [
"Apache-2.0"
] | null | null | null | setup.py | sommersoft/Adafruit_CircuitPython_GC_IOT_Core | 7e1ab12200d41f13c3561fc13f7eaaca2da86a40 | [
"Apache-2.0"
] | null | null | null | setup.py | sommersoft/Adafruit_CircuitPython_GC_IOT_Core | 7e1ab12200d41f13c3561fc13f7eaaca2da86a40 | [
"Apache-2.0"
] | null | null | null | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="adafruit-circuitpython-gc-iot-core",
use_scm_version=True,
setup_requires=["setuptools_scm"],
description="Google Cloud IoT Core Client for CircuitPython",
long_description=long_description,
long_description_content_type="text/x-rst",
# The project's main homepage.
url="https://github.com/adafruit/Adafruit_CircuitPython_GC_IOT_CORE",
# Author details
author="Adafruit Industries",
author_email="circuitpython@adafruit.com",
install_requires=["Adafruit-Blinka"],
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Hardware",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
# What does your project relate to?
keywords="adafruit blinka circuitpython micropython gc_iot_core gcs, google cloud "
"platform, cloud, iot, core",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# TODO: IF LIBRARY FILES ARE A PACKAGE FOLDER,
# CHANGE `py_modules=['...']` TO `packages=['...']`
py_modules=["adafruit_gc_iot_core"],
)
| 35.018182 | 87 | 0.687435 |
74a99fe25d427a5880e4355c435f83e5b2503dfe | 234 | py | Python | src/sql/master/stock_trans.py | springmaple/yotimes_sql_bridge | 0d1d17c63d867bb2c2ad286851343c1884fdddf8 | [
"Apache-2.0"
] | null | null | null | src/sql/master/stock_trans.py | springmaple/yotimes_sql_bridge | 0d1d17c63d867bb2c2ad286851343c1884fdddf8 | [
"Apache-2.0"
] | 4 | 2020-06-02T14:29:17.000Z | 2021-05-16T09:13:52.000Z | src/sql/master/stock_trans.py | springmaple/yotimes_sql_bridge | 0d1d17c63d867bb2c2ad286851343c1884fdddf8 | [
"Apache-2.0"
] | null | null | null | from sql.entity import Entity
class StockTrans(Entity):
def __init__(self, data):
super().__init__(data)
self.trans_no = self._get_int('TransNo') # Primary key
self.item_code = self._get_str('ItemCode')
| 26 | 63 | 0.666667 |
5967cde6a6ba67ca9be59e67309d47239668fbd1 | 3,668 | py | Python | tests/test_scenes.py | Karagusto/Game_ppg | f5f7eccaf46495e3872b94aba8df56abab83b622 | [
"Artistic-2.0"
] | 1 | 2018-07-30T15:05:45.000Z | 2018-07-30T15:05:45.000Z | tests/test_scenes.py | jamesBrosnahan/pursuedpybear | 8e5498b66de71c1fbe62f869d50803e22860b6f8 | [
"Artistic-2.0"
] | null | null | null | tests/test_scenes.py | jamesBrosnahan/pursuedpybear | 8e5498b66de71c1fbe62f869d50803e22860b6f8 | [
"Artistic-2.0"
] | null | null | null | from unittest.mock import Mock
from pytest import fixture
from pytest import mark
from pytest import raises
from ppb.scenes import BaseScene
from ppb.camera import Camera
from ppb.scenes import GameObjectCollection
class TestEnemy:
pass
class TestPlayer:
pass
class TestSubclassPlayer(TestPlayer):
pass
class TestSprite:
pass
def containers():
yield GameObjectCollection()
yield BaseScene()
def players():
yield TestPlayer()
yield TestSubclassPlayer()
def players_and_containers():
for player in players():
for container in containers():
yield player, container
@fixture()
def enemies():
return TestEnemy(), TestEnemy()
@fixture()
def scene():
return BaseScene()
@mark.parametrize("player, container", players_and_containers())
def test_add_methods(container, player, enemies):
container.add(player)
for group, sprite in zip(("red", "blue"), enemies):
container.add(sprite, [group])
assert player in container
for enemy in enemies:
assert enemy in container
@mark.parametrize("player, container", players_and_containers())
def test_get_methods(container, player, enemies):
sprite = TestSprite()
container.add(player, ["red"])
container.add(enemies[0])
container.add(enemies[1], ["red"])
container.add(sprite)
assert set(container.get(kind=TestEnemy)) == set(enemies)
assert set(container.get(kind=TestPlayer)) == {player}
assert set(container.get(kind=TestSprite)) == {sprite}
assert set(container.get(tag="red")) == {player, enemies[1]}
assert set(container.get(tag="this doesn't exist")) == set()
with raises(TypeError):
container.get()
@mark.parametrize("player, container", players_and_containers())
def test_get_with_string_tags(container, player):
"""Test that addings a string instead of an array-like throws."""
with raises(TypeError):
container.add(player, "player")
@mark.parametrize("player, container", players_and_containers())
def test_remove_methods(container, player, enemies):
container.add(player, ["test"])
container.add(enemies[0], ["test"])
container.add(enemies[1], ["blue"])
assert player in container
assert enemies[0] in container
assert enemies[1] in container
container.remove(player)
assert player not in container
for kind in container.kinds:
assert player not in container.get(kind=kind)
for tag in container.tags:
assert player not in container.get(tag=tag)
assert enemies[0] in container
assert enemies[0] in container.get(tag="test")
assert enemies[1] in container
@mark.parametrize("player", players())
def test_collection_methods(player, enemies):
container = GameObjectCollection()
container.add(player)
container.add(enemies[0])
# Test __len__
assert len(container) == 2
# Test __contains__
assert player in container
assert enemies[1] not in container
# Test __iter__
for game_object in container:
assert game_object is player or game_object is enemies[0]
def test_main_camera(scene):
assert isinstance(scene.main_camera, Camera)
old_cam = scene.main_camera
new_cam = Camera()
scene.main_camera = new_cam
assert scene.main_camera == new_cam
assert old_cam not in scene
assert new_cam in scene
def test_class_attrs():
class BackgroundScene(BaseScene):
background_color = (0, 4, 2)
scene = BackgroundScene()
assert scene.background_color == (0, 4, 2)
scene = BackgroundScene(background_color=(2, 4, 0))
assert scene.background_color == (2, 4, 0)
| 23.818182 | 69 | 0.698473 |
0106cff2042a26b0a9a8691849dc5f17d184736b | 19,924 | py | Python | built-in/TensorFlow/Official/nlp/Bert-qa_ID0369_for_TensorFlow/run_pretraining.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/TensorFlow/Official/nlp/Bert-qa_ID0369_for_TensorFlow/run_pretraining.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 1 | 2022-01-20T03:11:05.000Z | 2022-01-20T06:53:39.000Z | built-in/TensorFlow/Official/nlp/Bert-qa_ID0369_for_TensorFlow/run_pretraining.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run masked LM/next sentence masked_lm pre-training for BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from npu_bridge.npu_init import *
import os
import modeling
import optimization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer(
"max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence. "
"Must match data generation.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
next_sentence_labels = features["next_sentence_labels"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
(masked_lm_loss,
masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
bert_config, model.get_sequence_output(), model.get_embedding_table(),
masked_lm_positions, masked_lm_ids, masked_lm_weights)
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = get_next_sentence_output(
bert_config, model.get_pooled_output(), next_sentence_labels)
total_loss = masked_lm_loss + next_sentence_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
eval_metrics = (metric_fn, [
masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels
])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_next_sentence_output(bert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def input_fn_builder(input_files,
max_seq_length,
max_predictions_per_seq,
is_training,
num_cpu_threads=4):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask":
tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.FixedLenFeature([1], tf.int64),
}
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True))
return d
return input_fn
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
#for NPU
session_config = tf.ConfigProto()
custom_op = session_config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = "NpuOptimizer"
custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
#for NPU
run_config = tf.contrib.tpu.RunConfig(
session_config = session_config,
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.num_train_steps,
num_warmup_steps=FLAGS.num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=npu_run_config_init(run_config=run_config),
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size, eval_on_tpu=False, export_to_tpu=False)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps, hooks=npu_hooks_append())
if FLAGS.do_eval:
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False)
result = estimator.evaluate(
input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| 38.38921 | 103 | 0.696748 |
7f95d96d263b208d9663c4a9037679523e9642ad | 3,582 | py | Python | reportportal_client/core/rp_response.py | Scanters/client-Python | ece2a12ac390d8d66d507c5d61104b695851ff5d | [
"Apache-2.0"
] | null | null | null | reportportal_client/core/rp_response.py | Scanters/client-Python | ece2a12ac390d8d66d507c5d61104b695851ff5d | [
"Apache-2.0"
] | null | null | null | reportportal_client/core/rp_response.py | Scanters/client-Python | ece2a12ac390d8d66d507c5d61104b695851ff5d | [
"Apache-2.0"
] | 1 | 2020-04-10T07:23:15.000Z | 2020-04-10T07:23:15.000Z | # -* encoding: utf-8 *-
from requests import Response
from reportportal_client.static.defines import NOT_FOUND
from reportportal_client.static.exception import EntryCreatedError, OperationCompletionError, ResponseError
class RPMessage(object):
__slots__ = ["message", "error_code"]
def __init__(self, data):
assert isinstance(data, dict)
self.message = data.get("message", NOT_FOUND)
self.error_code = data.get("error_code", NOT_FOUND)
def __str__(self):
if self.error_code is NOT_FOUND:
return self.message
return "{error_code}: {message}".format(error_code=self.error_code, message=self.message)
@property
def is_empty(self):
return self.message is NOT_FOUND
class RPResponse(object):
__slots__ = ["_data"]
def __init__(self, data):
# type: (Response) -> None
self._data = self._to_json(data) # type: dict
@staticmethod
def _get_json(data):
# type: (Response) -> dict
if not data.text:
return {}
try:
return data.json()
except ValueError as error:
raise ResponseError("Invalid response: {}: {}".format(error, data.text))
@property
def json(self):
return self._data
@property
def is_success(self):
return True
@property
def id(self):
return self.json.get("id", NOT_FOUND)
@property
def message(self):
return self.json.get("msg", NOT_FOUND)
@property
def messages(self):
return tuple(self.iter_messages())
def iter_messages(self):
data = self.json.get("responses", [self.json])
for chunk in data:
message = RPMessage(chunk)
if not message.is_empty:
yield message
# ---- OLD ---
def _get_id(self, response):
try:
return self._get_data(response)["id"]
except KeyError:
raise EntryCreatedError(
"No 'id' in response: {0}".format(response.text))
def _get_msg(self, response):
try:
return self._get_data(response)["msg"]
except KeyError:
raise OperationCompletionError(
"No 'msg' in response: {0}".format(response.text))
def _get_data(self, response):
data = self._to_json(response)
error_messages = self._get_messages(data)
error_count = len(error_messages)
if error_count == 1:
raise ResponseError(error_messages[0])
elif error_count > 1:
raise ResponseError(
"\n - ".join(["Multiple errors:"] + error_messages))
elif not response.ok:
response.raise_for_status()
elif not data:
raise ResponseError("Empty response")
else:
return data
def _to_json(self, response):
try:
if response.text:
return response.json()
else:
return {}
except ValueError as value_error:
raise ResponseError(
"Invalid response: {0}: {1}".format(value_error, response.text))
def _get_messages(self, data):
error_messages = []
for ret in data.get("responses", [data]):
if "message" in ret:
if "error_code" in ret:
error_messages.append(
"{0}: {1}".format(ret["error_code"], ret["message"]))
else:
error_messages.append(ret["message"])
return error_messages
| 27.984375 | 107 | 0.577052 |
fbbd8e3d5f7ba1c8b1f2cae7c06a71daeba960ea | 6,718 | py | Python | accounts/views.py | rabiyulfahimhasim786/classbasedview | b09bfcb20e9cd5584a3fef9797251864ff0233a1 | [
"MIT"
] | null | null | null | accounts/views.py | rabiyulfahimhasim786/classbasedview | b09bfcb20e9cd5584a3fef9797251864ff0233a1 | [
"MIT"
] | null | null | null | accounts/views.py | rabiyulfahimhasim786/classbasedview | b09bfcb20e9cd5584a3fef9797251864ff0233a1 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from rest_framework.response import Response
from .serializers import CurrenciesSerializer, StandardsSerializer, CountriesSerializer, TagSerializer
from .models import Currencies, Standards, Countries, Tag
from django.http import Http404
from rest_framework.views import APIView
from rest_framework import status
def new(request):
return HttpResponse("Hello, world. You're at the accounts index.")
def index(request):
return render(request, './index.html')
class CurrencyList(APIView):
"""
List all snippets, or create a new snippet.
"""
def get(self, request, format=None):
currencies = Currencies.objects.all()
serializer = CurrenciesSerializer(currencies, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = CurrenciesSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CurrencyDetail(APIView):
"""
Retrieve, update or delete a snippet instance.
"""
def get_object(self, pk):
try:
return Currencies.objects.get(pk=pk)
except Currencies.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
currencies = self.get_object(pk)
serializer = CurrenciesSerializer(currencies)
return Response(serializer.data)
def put(self, request, pk, format=None):
currencies = self.get_object(pk)
serializer = CurrenciesSerializer(currencies, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class CountryList(APIView):
"""
List all snippets, or create a new snippet.
"""
def get(self, request, format=None):
countries = Countries.objects.all()
serializer = CountriesSerializer(countries, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = CountriesSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CountryDetail(APIView):
"""
Retrieve, update or delete a snippet instance.
"""
def get_object(self, pk):
try:
return Countries.objects.get(pk=pk)
except Countries.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
countries = self.get_object(pk)
serializer = CountriesSerializer(countries)
return Response(serializer.data)
def put(self, request, pk, format=None):
countries = self.get_object(pk)
serializer = CountriesSerializer(countries, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class StandardList(APIView):
"""
List all snippets, or create a new snippet.
"""
def get(self, request, format=None):
standards = Standards.objects.all()
serializer = StandardsSerializer(standards, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = StandardsSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class StandardDetail(APIView):
"""
Retrieve, update or delete a snippet instance.
"""
def get_object(self, pk):
try:
return Standards.objects.get(pk=pk)
except Standards.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
standards = self.get_object(pk)
serializer = StandardsSerializer(standards)
return Response(serializer.data)
def put(self, request, pk, format=None):
standards = self.get_object(pk)
serializer = StandardsSerializer(standards, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class TagList(APIView):
"""
List all snippets, or create a new snippet.
"""
def get(self, request, format=None):
tag = Tag.objects.all()
serializer = TagSerializer(tag, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = TagSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TagDetail(APIView):
"""
Retrieve, update or delete a snippet instance.
"""
def get_object(self, pk):
try:
return Tag.objects.get(pk=pk)
except Tag.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
standards = self.get_object(pk)
serializer = TagSerializer(standards)
return Response(serializer.data)
def put(self, request, pk, format=None):
Tag = self.get_object(pk)
serializer = TagSerializer(Tag, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| 34.27551 | 102 | 0.670586 |
b68fe2e6aae1a445fb0badd6bc2c5d265f57a98b | 5,184 | py | Python | examples/models/glyphs.py | pyjsdev/googlemap_flask | 9d0dd899a9cbf756b3d83c33e3d8a47e7db40cc5 | [
"BSD-3-Clause"
] | 6 | 2018-02-13T11:06:40.000Z | 2020-10-23T09:30:46.000Z | examples/models/glyphs.py | pyjsdev/googlemap_flask | 9d0dd899a9cbf756b3d83c33e3d8a47e7db40cc5 | [
"BSD-3-Clause"
] | 6 | 2018-11-29T15:40:20.000Z | 2021-05-07T14:59:30.000Z | examples/models/glyphs.py | pyjsdev/googlemap_flask | 9d0dd899a9cbf756b3d83c33e3d8a47e7db40cc5 | [
"BSD-3-Clause"
] | 3 | 2018-06-20T11:43:40.000Z | 2021-12-21T06:51:56.000Z | import numpy as np
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, HoverTool
from bokeh.models.widgets import Tabs, Panel, Paragraph
from bokeh.models.layouts import Column
from bokeh.models.glyphs import (
AnnularWedge, Annulus, Arc, Bezier, Circle, ImageURL, Line, MultiLine, Oval,
Patch, Patches, Quad, Quadratic, Ray, Rect, Segment, Square, Text, Wedge, CircleX, Triangle,
Cross, Diamond, InvertedTriangle, SquareX, Asterisk, SquareCross, DiamondCross, CircleCross, X
)
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.util.browser import view
N = 9
x = np.linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
xpts = np.array([-.09, -.12, .0, .12, .09])
ypts = np.array([-.1, .02, .1, .02, -.1])
source = ColumnDataSource(dict(
x = x,
y = y,
sizes = sizes,
xs = [ xpts + xx for xx in x ],
ys = [ ypts + yy for yy in y ],
xp02 = x + 0.2,
xp01 = x + 0.1,
xm01 = x - 0.1,
yp01 = y + 0.1,
ym01 = y - 0.1,
))
xdr = DataRange1d()
ydr = DataRange1d()
def screen(value):
return dict(value=value, units="screen")
glyphs = [
("annular_wedge", AnnularWedge(x="x", y="y", inner_radius=screen(10), outer_radius=screen(20), start_angle=0.6, end_angle=4.1, fill_color="#8888ee")),
("annulus", Annulus(x="x", y="y", inner_radius=screen(10), outer_radius=screen(20), fill_color="#7FC97F")),
("arc", Arc(x="x", y="y", radius=screen(20), start_angle=0.6, end_angle=4.1, line_color="#BEAED4", line_width=3)),
("bezier", Bezier(x0="x", y0="y", x1="xp02", y1="y", cx0="xp01", cy0="yp01", cx1="xm01", cy1="ym01", line_color="#D95F02", line_width=2)),
("image_url", ImageURL(x="x", y="y", w=0.4, h=0.4, url=dict(value="http://bokeh.pydata.org/en/latest/_static/images/logo.png"), anchor="center")),
("line", Line(x="x", y="y", line_color="#F46D43")),
("multi_line", MultiLine(xs="xs", ys="ys", line_color="#8073AC", line_width=2)),
("oval", Oval(x="x", y="y", width=screen(15), height=screen(25), angle=-0.7, fill_color="#1D91C0")),
("patch", Patch(x="x", y="y", fill_color="#A6CEE3")),
("patches", Patches(xs="xs", ys="ys", fill_color="#FB9A99")),
("quad", Quad(left="x", right="xp01", top="y", bottom="ym01", fill_color="#B3DE69")),
("quadratic", Quadratic(x0="x", y0="y", x1="xp02", y1="y", cx="xp01", cy="yp01", line_color="#4DAF4A", line_width=3)),
("ray", Ray(x="x", y="y", length=45, angle=-0.7, line_color="#FB8072", line_width=2)),
("rect", Rect(x="x", y="y", width=screen(10), height=screen(20), angle=-0.7, fill_color="#CAB2D6")),
("segment", Segment(x0="x", y0="y", x1="xm01", y1="ym01", line_color="#F4A582", line_width=3)),
("text", Text(x="x", y="y", text=["hello"])),
("wedge", Wedge(x="x", y="y", radius=screen(15), start_angle=0.6, end_angle=4.1, fill_color="#B3DE69")),
]
markers = [
("circle", Circle(x="x", y="y", radius=0.1, fill_color="#3288BD")),
("circle_x", CircleX(x="x", y="y", size="sizes", line_color="#DD1C77", fill_color=None)),
("circle_cross", CircleCross(x="x", y="y", size="sizes", line_color="#FB8072", fill_color=None, line_width=2)),
("square", Square(x="x", y="y", size="sizes", fill_color="#74ADD1")),
("square_x", SquareX(x="x", y="y", size="sizes", line_color="#FDAE6B", fill_color=None, line_width=2)),
("square_cross", SquareCross(x="x", y="y", size="sizes", line_color="#7FC97F", fill_color=None, line_width=2)),
("diamond", Diamond(x="x", y="y", size="sizes", line_color="#1C9099", line_width=2)),
("diamond_cross", DiamondCross(x="x", y="y", size="sizes", line_color="#386CB0", fill_color=None, line_width=2)),
("triangle", Triangle(x="x", y="y", size="sizes", line_color="#99D594", line_width=2)),
("inverted_triangle", InvertedTriangle(x="x", y="y", size="sizes", line_color="#DE2D26", line_width=2)),
("cross", Cross(x="x", y="y", size="sizes", line_color="#E6550D", fill_color=None, line_width=2)),
("asterisk", Asterisk(x="x", y="y", size="sizes", line_color="#F0027F", fill_color=None, line_width=2)),
("x", X(x="x", y="y", size="sizes", line_color="thistle", fill_color=None, line_width=2)),
]
def make_tab(title, glyph):
plot = Plot(x_range=xdr, y_range=ydr)
plot.title.text = title
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
plot.add_tools(HoverTool())
tab = Panel(child=plot, title=title)
return tab
def make_tabs(objs):
return Tabs(tabs=[ make_tab(title, obj) for title, obj in objs ], width=600)
layout = Column(children=[Paragraph(text="Only Image and ImageRGBA glyphs are not demonstrated."), make_tabs(glyphs), make_tabs(markers)])
doc = Document()
doc.add_root(layout)
if __name__ == "__main__":
doc.validate()
filename = "glyphs.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Glyphs"))
print("Wrote %s" % filename)
view(filename)
| 44.689655 | 154 | 0.63561 |
656ba7dcf0a02a597ec93c664469f8d58f0d198f | 7,189 | py | Python | src/decoder.py | aarora8/VistaOCR | dd8a41e45d4efe49816054c097d06be41937fae1 | [
"Apache-2.0"
] | 24 | 2018-06-05T06:38:24.000Z | 2021-12-08T12:20:06.000Z | src/decoder.py | aarora8/VistaOCR | dd8a41e45d4efe49816054c097d06be41937fae1 | [
"Apache-2.0"
] | 3 | 2018-08-16T23:47:30.000Z | 2020-02-13T09:48:53.000Z | src/decoder.py | aarora8/VistaOCR | dd8a41e45d4efe49816054c097d06be41937fae1 | [
"Apache-2.0"
] | 10 | 2018-05-07T17:30:45.000Z | 2020-01-29T10:34:02.000Z | import sys
import os
import numpy as np
import torch
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from textutils import uxxxx_to_utf8
class LmDecoder:
# max active 3k - 7k
# beam: 11 - 17
# lattice_beam 6-9
def __init__(self, alphabet, lm_file, word_sym_file, lm_units, acoustic_weight=0.8, max_active=5000, beam=16.0, lattice_beam=10.0):
self.alphabet = alphabet
# Only pull in if needed
script_path = os.path.dirname(os.path.realpath(__file__))
#sys.path.append(script_path + "/../eesen")
sys.path.insert(0,'/home/hltcoe/srawls/pyeesen')
import eesen
print("Loading eesen from: %s" % eesen.__file__)
self.acoustic_weight = acoustic_weight
self.lattice_decoder = eesen.LatticeDecoder(lm_file, word_sym_file, self.acoustic_weight, max_active, beam,
lattice_beam)
#self.lattice_decoder.EnableLattices("/home/hltcoe/srawls/tmp.lat.gz")
# Need to keep track of model-alphabet to LM-alphabet conversion
units = ['<ctc-blank>']
with open(lm_units, 'r') as fh:
for line in fh:
units.append(line.strip().split(' ')[0])
self.lmidx_to_char = units
self.lmchar_to_idx = dict(zip(units, range(len(units))))
# Let's precompute some stuff to make lm faster
print("Prep work...")
self.lm_swap_idxs = []
self.lm_swap_idxs_modelidx = []
self.lm_swap_idxs_lmidx = []
self.add_to_blank_char = []
self.add_to_blank_idx = []
for model_idx in range(len(self.alphabet.idx_to_char)):
char = self.alphabet.idx_to_char[model_idx]
if not char in self.lmchar_to_idx:
self.add_to_blank_char.append(char)
self.add_to_blank_idx.append(model_idx)
continue
lm_idx = self.lmchar_to_idx[char]
self.lm_swap_idxs.append( (model_idx,lm_idx) )
self.lm_swap_idxs_modelidx.append(model_idx)
self.lm_swap_idxs_lmidx.append(lm_idx)
print("Done prep work")
if len(self.add_to_blank_char) > 0:
print("\tFYI: these chars were in model but not in LM: %s" % str(self.add_to_blank_char))
def decode(self, tp_executor, model_output, batch_actual_timesteps, uttids, uxxxx=False):
T = model_output.size()[0]
B = model_output.size()[1]
# Actual model output is not set to probability vector yet, need to run softmax
probs = torch.nn.functional.log_softmax(model_output.view(-1, model_output.size(2)), dim=1).view(model_output.size(0),
model_output.size(1),
-1)
# Need to take care of issue where prob goes to a char in model-alphabet but not in lm-alphabet
# Just assign high prob to ctc-blank?
#print("Sum of missing chars' prob = %s" % str(model_output[:,:,self.add_to_blank_idx].sum(dim=2)))
#probs[:,:,0] += probs[:,:,self.add_to_blank_idx].sum(dim=2)
#probs[:,:,self.add_to_blank_idx] = 0
# Make sure we're on CPU
probs = probs.data.cpu()
# We process decoder parallely in worker threads; store those async futures here
decoder_futures = [None]*B
def decode_helper(probs, uttid, uxxxx):
res = self.lattice_decoder.Decode(probs, uttid)
res_utf8 = ''
if uxxxx == False:
for uxxxx_word in res.split(' '):
res_utf8 += ''.join([uxxxx_to_utf8(r) for r in uxxxx_word.split('_')])
res = res_utf8
else:
res_flatten = ''
for uxxxx_word in res.split(' '):
for uxxxx_char in uxxxx_word.split('_'):
res_flatten += uxxxx_char
res_flatten += ' '
res = res_flatten.strip()
return res
for b in range(B):
probs_remapped = np.full( (batch_actual_timesteps[b], len(self.lmidx_to_char)), np.log(1e-10))
probs_remapped[:,self.lm_swap_idxs_lmidx] = probs[:batch_actual_timesteps[b], b, self.lm_swap_idxs_modelidx]
# Just for right-to-left languages!
#probs_remapped = probs_remapped[::-1]
decoder_futures[b] = tp_executor.submit(decode_helper, probs_remapped, uttids[b], uxxxx)
# At this point all decoder tasks are done (we are outside scope of with ThreadPoolExecutor, so it has finished)
return decoder_futures
class ArgmaxDecoder:
def __init__(self, alphabet):
self.alphabet = alphabet
def decode(self, model_output, batch_actual_timesteps, uxxxx=False, lang=None):
start_decode = datetime.now()
if lang is None:
min_prob_thresh = 3 * 1 / len(self.alphabet)
else:
min_prob_thresh = 3 * 1 / len(self.alphabet[lang])
T = model_output.size()[0]
B = model_output.size()[1]
prev_char = ['' for _ in range(B)]
result = ['' for _ in range(B)]
for t in range(T):
# #gpu argmax (bug!!!!!)
# gpu_argmax = True
# argmaxs, argmax_idxs = model_output.data[t].max(dim=1)
# argmaxs.squeeze_()
# argmax_idxs.squeeze_()
# cpu argmax
gpu_argmax = False
model_output_at_t_cpu = model_output.data[t].cpu().numpy()
argmaxs = model_output_at_t_cpu.max(1).flatten()
argmax_idxs = model_output_at_t_cpu.argmax(1).flatten()
for b in range(B):
# Only look at valid model output for this batch entry
if t >= batch_actual_timesteps[b]:
continue
if argmax_idxs[b] == 0: # CTC Blank
prev_char[b] = ''
continue
# Heuristic
# If model is predicting very low probability for all letters in alphabet, treat that the
# samed as a CTC blank
if argmaxs[b] < min_prob_thresh:
prev_char[b] = ''
continue
if lang is None:
char = self.alphabet.idx_to_char[argmax_idxs[b]]
else:
char = self.alphabet[lang].idx_to_char[argmax_idxs[b]]
if prev_char[b] == char:
continue
result[b] += char
prev_char[b] = char
# Add a space to all but last iteration
if t != T - 1:
result[b] += ' '
# Strip off final token-stream space if needed
for b in range(B):
if len(result[b]) > 0 and result[b][-1] == ' ':
result[b] = result[b][:-1]
# Check if we should return utf8 output
if uxxxx == False:
result = [uxxxx_to_utf8(r) for r in result]
return result
| 38.650538 | 135 | 0.559327 |
30ef54f0af0c0a8d43905af968d5f90406306d35 | 19,109 | py | Python | gamestonk_terminal/main_helper.py | crsdgzmn/GamestonkTerminal | 8cfa224cbb2833496ee4a1199356096a8b824a13 | [
"MIT"
] | 1 | 2021-05-04T04:57:13.000Z | 2021-05-04T04:57:13.000Z | gamestonk_terminal/main_helper.py | lolrenx/GamestonkTerminal | eb2b0d766bf1b6bb8656d6733083962efb152fe2 | [
"MIT"
] | null | null | null | gamestonk_terminal/main_helper.py | lolrenx/GamestonkTerminal | eb2b0d766bf1b6bb8656d6733083962efb152fe2 | [
"MIT"
] | null | null | null | import argparse
from sys import stdout
import random
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import pandas as pd
from alpha_vantage.timeseries import TimeSeries
import mplfinance as mpf
import yfinance as yf
import pytz
from gamestonk_terminal.helper_funcs import (
valid_date,
plot_view_stock,
parse_known_args_and_warn,
check_ohlc,
lett_to_num,
check_sources,
plot_autoscale,
)
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.technical_analysis import trendline_api as trend
def print_help(s_ticker, s_start, s_interval, b_is_market_open):
"""Print help"""
print("What do you want to do?")
print(" help help to see this menu again")
print(" quit to abandon the program")
print("")
print(" clear clear a specific stock ticker from analysis")
print(" load load a specific stock ticker for analysis")
print(" candle view a candle chart for a specific stock ticker")
print(" view view and load a specific stock ticker for technical analysis")
if s_ticker:
print(
" export export the currently loaded dataframe to a file or stdout"
)
s_intraday = (f"Intraday {s_interval}", "Daily")[s_interval == "1440min"]
if s_ticker and s_start:
print(f"\n{s_intraday} Stock: {s_ticker} (from {s_start.strftime('%Y-%m-%d')})")
elif s_ticker:
print(f"\n{s_intraday} Stock: {s_ticker}")
else:
print("\nStock: ?")
print(f"Market {('CLOSED', 'OPEN')[b_is_market_open]}.\n")
print(
" > disc discover trending stocks, \t e.g. map, sectors, high short interest"
)
print(
" > scr screener stocks, \t\t e.g. overview/performance, using preset filters"
)
print(" > mill papermill menu, \t\t menu to generate notebook reports")
print(" > econ economic data, \t\t e.g.: FRED, events")
print(
" > pa portfolio analysis, \t\t supports: robinhood, alpaca, ally "
)
print(
" > crypto cryptocurrencies, \t\t from: coingecko, coinmarketcap, binance"
)
print(
" > po portfolio optimization, \t optimal portfolio weights from pyportfolioopt"
)
print(" > fx forex menu, \t\t\t forex support through Oanda")
print(" > rc resource collection, \t\t e.g. hf letters")
if s_ticker:
print(
" > ba behavioural analysis, \t from: reddit, stocktwits, twitter, google"
)
print(
" > res research web page, \t e.g.: macroaxis, yahoo finance, fool"
)
print(
" > ca comparison analysis, \t e.g.: historical, correlation, financials"
)
print(
" > fa fundamental analysis, \t e.g.: income, balance, cash, earnings"
)
print(
" > ta technical analysis, \t e.g.: ema, macd, rsi, adx, bbands, obv"
)
print(
" > bt strategy backtester, \t e.g.: simple ema, ema cross, rsi strategies"
)
print(
" > dd in-depth due-diligence, \t e.g.: news, analyst, shorts, insider, sec"
)
print(
" > eda exploratory data analysis,\t e.g.: decompose, cusum, residuals analysis"
)
print(
" > pred prediction techniques, \t e.g.: regression, arima, rnn, lstm, prophet"
)
print(
" > ra residuals analysis, \t e.g.: model fit, qqplot, hypothesis test"
)
print(
" > op options info , \t e.g.: volume and open interest"
)
print("")
def clear(l_args, s_ticker, s_start, s_interval, df_stock):
parser = argparse.ArgumentParser(
add_help=False,
prog="clear",
description="""Clear previously loaded stock ticker.""",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return "", "", "", pd.DataFrame()
print("Clearing stock ticker to be used for analysis\n")
return "", "", "", pd.DataFrame()
except SystemExit:
print("")
return s_ticker, s_start, s_interval, df_stock
def load(l_args, s_ticker, s_start, s_interval, df_stock):
parser = argparse.ArgumentParser(
add_help=False,
prog="load",
description="Load stock ticker to perform analysis on. When the data source is 'yf', an Indian ticker can be"
" loaded by using '.NS' at the end, e.g. 'SBIN.NS'. See available market in"
" https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html.",
)
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="s_ticker",
required=True,
help="Stock ticker",
)
parser.add_argument(
"-s",
"--start",
type=valid_date,
default="2015-01-01",
dest="s_start_date",
help="The starting date (format YYYY-MM-DD) of the stock",
)
parser.add_argument(
"-i",
"--interval",
action="store",
dest="n_interval",
type=int,
default=1440,
choices=[1, 5, 15, 30, 60],
help="Intraday stock minutes",
)
parser.add_argument(
"--source",
action="store",
dest="source",
type=check_sources,
default="yf",
help="Source of historical data. 'yf' and 'av' available.",
)
parser.add_argument(
"-p",
"--prepost",
action="store_true",
default=False,
dest="b_prepost",
help="Pre/After market hours. Only works for 'yf' source, and intraday data",
)
try:
# For the case where a user uses: 'load BB'
if l_args:
if "-" not in l_args[0]:
l_args.insert(0, "-t")
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return [s_ticker, s_start, s_interval, df_stock]
# Daily
if ns_parser.n_interval == 1440:
# Alpha Vantage Source
if ns_parser.source == "av":
ts = TimeSeries(key=cfg.API_KEY_ALPHAVANTAGE, output_format="pandas")
# pylint: disable=unbalanced-tuple-unpacking
df_stock_candidate, _ = ts.get_daily_adjusted(
symbol=ns_parser.s_ticker, outputsize="full"
)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
print("")
return [s_ticker, s_start, s_interval, df_stock]
# pylint: disable=no-member
df_stock_candidate.sort_index(ascending=True, inplace=True)
# Slice dataframe from the starting date YYYY-MM-DD selected
df_stock_candidate = df_stock_candidate[ns_parser.s_start_date :]
# Yahoo Finance Source
elif ns_parser.source == "yf":
df_stock_candidate = yf.download(
ns_parser.s_ticker, start=ns_parser.s_start_date, progress=False
)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
print("")
return [s_ticker, s_start, s_interval, df_stock]
df_stock_candidate = df_stock_candidate.rename(
columns={
"Open": "1. open",
"High": "2. high",
"Low": "3. low",
"Close": "4. close",
"Adj Close": "5. adjusted close",
"Volume": "6. volume",
}
)
df_stock_candidate.index.name = "date"
# Check if start time from dataframe is more recent than specified
if df_stock_candidate.index[0] > pd.to_datetime(ns_parser.s_start_date):
s_start = df_stock_candidate.index[0]
else:
s_start = ns_parser.s_start_date
# Intraday
else:
# Alpha Vantage Source
if ns_parser.source == "av":
ts = TimeSeries(key=cfg.API_KEY_ALPHAVANTAGE, output_format="pandas")
# pylint: disable=unbalanced-tuple-unpacking
df_stock_candidate, _ = ts.get_intraday(
symbol=ns_parser.s_ticker,
outputsize="full",
interval=str(ns_parser.n_interval) + "min",
)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
print("")
return [s_ticker, s_start, s_interval, df_stock]
# pylint: disable=no-member
df_stock_candidate.sort_index(ascending=True, inplace=True)
# Slice dataframe from the starting date YYYY-MM-DD selected
df_stock_candidate = df_stock_candidate[ns_parser.s_start_date :]
# Check if start time from dataframe is more recent than specified
if df_stock_candidate.index[0] > pd.to_datetime(ns_parser.s_start_date):
s_start = df_stock_candidate.index[0]
else:
s_start = ns_parser.s_start_date
# Yahoo Finance Source
elif ns_parser.source == "yf":
s_int = str(ns_parser.n_interval) + "m"
d_granularity = {"1m": 6, "5m": 59, "15m": 59, "30m": 59, "60m": 729}
s_start_dt = datetime.utcnow() - timedelta(days=d_granularity[s_int])
s_date_start = s_start_dt.strftime("%Y-%m-%d")
if s_start_dt > ns_parser.s_start_date:
# Using Yahoo Finance with granularity {s_int} the starting date is set to: {s_date_start}
df_stock_candidate = yf.download(
ns_parser.s_ticker,
start=s_date_start,
progress=False,
interval=s_int,
prepost=ns_parser.b_prepost,
)
else:
df_stock_candidate = yf.download(
ns_parser.s_ticker,
start=ns_parser.s_start_date.strftime("%Y-%m-%d"),
progress=False,
interval=s_int,
prepost=ns_parser.b_prepost,
)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
print("")
return [s_ticker, s_start, s_interval, df_stock]
if s_start_dt > ns_parser.s_start_date:
s_start = pytz.utc.localize(s_start_dt)
else:
s_start = ns_parser.s_start_date
df_stock_candidate = df_stock_candidate.rename(
columns={
"Open": "1. open",
"High": "2. high",
"Low": "3. low",
"Close": "4. close",
"Adj Close": "5. adjusted close",
"Volume": "6. volume",
}
)
df_stock_candidate.index.name = "date"
s_intraday = (f"Intraday {s_interval}", "Daily")[ns_parser.n_interval == 1440]
print(
f"Loading {s_intraday} {ns_parser.s_ticker.upper()} stock "
f"with starting period {s_start.strftime('%Y-%m-%d')} for analysis.\n"
)
return [
ns_parser.s_ticker.upper(),
s_start,
str(ns_parser.n_interval) + "min",
df_stock_candidate,
]
except Exception as e:
print(e, "\nEither the ticker or the API_KEY are invalids. Try again!\n")
return [s_ticker, s_start, s_interval, df_stock]
except SystemExit:
print("")
return [s_ticker, s_start, s_interval, df_stock]
def candle(s_ticker: str, s_start: str):
df_stock = trend.load_ticker(s_ticker, s_start)
df_stock = trend.find_trendline(df_stock, "OC_High", "high")
df_stock = trend.find_trendline(df_stock, "OC_Low", "low")
mc = mpf.make_marketcolors(
up="green", down="red", edge="black", wick="black", volume="in", ohlc="i"
)
s = mpf.make_mpf_style(marketcolors=mc, gridstyle=":", y_on_right=True)
ap0 = []
if "OC_High_trend" in df_stock.columns:
ap0.append(
mpf.make_addplot(df_stock["OC_High_trend"], color="g"),
)
if "OC_Low_trend" in df_stock.columns:
ap0.append(
mpf.make_addplot(df_stock["OC_Low_trend"], color="b"),
)
if gtff.USE_ION:
plt.ion()
mpf.plot(
df_stock,
type="candle",
mav=(20, 50),
volume=True,
title=f"\n{s_ticker} - Last 6 months",
addplot=ap0,
xrotation=10,
style=s,
figratio=(10, 7),
figscale=1.10,
figsize=(plot_autoscale()),
update_width_config=dict(
candle_linewidth=1.0, candle_width=0.8, volume_linewidth=1.0
),
)
print("")
def view(l_args, s_ticker, s_start, s_interval, df_stock):
parser = argparse.ArgumentParser(
add_help=False,
prog="view",
description="Visualize historical data of a stock. An alpha_vantage key is necessary.",
)
if s_ticker:
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="s_ticker",
default=s_ticker,
help="Stock ticker",
)
else:
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="s_ticker",
required=True,
help="Stock ticker",
)
parser.add_argument(
"-s",
"--start",
type=valid_date,
dest="s_start_date",
default=s_start,
help="The starting date (format YYYY-MM-DD) of the stock",
)
parser.add_argument(
"-i",
"--interval",
action="store",
dest="n_interval",
type=int,
default=0,
choices=[1, 5, 15, 30, 60],
help="Intraday stock minutes",
)
parser.add_argument(
"--type",
action="store",
dest="type",
type=check_ohlc,
default="a", # in case it's adjusted close
help=(
"ohlc corresponds to types: open; high; low; close; "
"while oc corresponds to types: open; close"
),
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
except SystemExit:
print("")
return
# Update values:
s_ticker = ns_parser.s_ticker
# A new interval intraday period was given
if ns_parser.n_interval != 0:
s_interval = str(ns_parser.n_interval) + "min"
type_candles = lett_to_num(ns_parser.type)
df_stock.sort_index(ascending=True, inplace=True)
# Slice dataframe from the starting date YYYY-MM-DD selected
df_stock = df_stock[ns_parser.s_start_date :]
# Daily
if s_interval == "1440min":
# The default doesn't exist for intradaily data
ln_col_idx = [int(x) - 1 for x in list(type_candles)]
# Check that the types given are not bigger than 4, as there are only 5 types (0-4)
# pylint: disable=len-as-condition
if len([i for i in ln_col_idx if i > 4]) > 0:
print("An index bigger than 4 was given, which is wrong. Try again")
return
# Append last column of df to be filtered which corresponds to: 6. Volume
ln_col_idx.append(5)
# Intraday
else:
# The default doesn't exist for intradaily data
if ns_parser.type == "a":
ln_col_idx = [3]
else:
ln_col_idx = [int(x) - 1 for x in list(type_candles)]
# Check that the types given are not bigger than 3, as there are only 4 types (0-3)
# pylint: disable=len-as-condition
if len([i for i in ln_col_idx if i > 3]) > 0:
print("An index bigger than 3 was given, which is wrong. Try again")
return
# Append last column of df to be filtered which corresponds to: 5. Volume
ln_col_idx.append(4)
# Plot view of the stock
plot_view_stock(df_stock.iloc[:, ln_col_idx], ns_parser.s_ticker)
def export(l_args, df_stock):
parser = argparse.ArgumentParser(
add_help=False,
prog="export",
description="Exports the historical data from this ticker to a file or stdout.",
)
parser.add_argument(
"-f",
"--filename",
type=str,
dest="s_filename",
default=stdout,
help="Name of file to save the historical data exported (stdout if unspecified)",
)
parser.add_argument(
"-F",
"--format",
dest="s_format",
type=str,
default="csv",
help="Export historical data into following formats: csv, json, excel, clipboard",
)
try:
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
except SystemExit:
print("")
return
if df_stock.empty:
print("No data loaded yet to export.")
return
if ns_parser.s_format == "csv":
df_stock.to_csv(ns_parser.s_filename)
elif ns_parser.s_format == "json":
df_stock.to_json(ns_parser.s_filename)
elif ns_parser.s_format == "excel":
df_stock.to_excel(ns_parser.s_filename)
elif ns_parser.s_format == "clipboard":
df_stock.to_clipboard()
print("")
def print_goodbye():
goodbye_msg = [
"An informed ape, is a strong ape. ",
"Remember that stonks only go up. ",
"Diamond hands. ",
"Apes together strong. ",
"This is our way. ",
"Keep the spacesuit ape, we haven't reached the moon yet. ",
"I am not a cat. I'm an ape. ",
"We like the terminal. ",
]
goodbye_hr = datetime.now().hour
if goodbye_hr < 5:
goodbye_msg_time = "Go get some rest soldier!"
elif goodbye_hr < 11:
goodbye_msg_time = "Rise and shine baby!"
elif goodbye_hr < 17:
goodbye_msg_time = "Enjoy your day!"
elif goodbye_hr < 23:
goodbye_msg_time = "Tomorrow's another day!"
else:
goodbye_msg_time = "Go get some rest soldier!"
print(
goodbye_msg[random.randint(0, len(goodbye_msg) - 1)] + goodbye_msg_time + "\n"
)
| 33.34904 | 117 | 0.550788 |
a1e4312150f4ab81f7905b4b1833b62393b50863 | 596 | py | Python | theory/2nd_sprint/oop_baes04.py | abi83/YaPractice | 1c3a5670ee2f872d4f872623a392755318b893b5 | [
"MIT"
] | 3 | 2020-11-18T05:16:30.000Z | 2021-03-08T06:36:01.000Z | theory/2nd_sprint/oop_baes04.py | abi83/YaPractice | 1c3a5670ee2f872d4f872623a392755318b893b5 | [
"MIT"
] | null | null | null | theory/2nd_sprint/oop_baes04.py | abi83/YaPractice | 1c3a5670ee2f872d4f872623a392755318b893b5 | [
"MIT"
] | 1 | 2021-01-20T12:41:48.000Z | 2021-01-20T12:41:48.000Z | import math
class Planet:
def __init__(self, name, radius, temp_celsius):
self.name = name
self.surface_area = 4 * math.pi * radius**2
self.average_temp_celcius = temp_celsius
self.average_temp_fahrenheit = temp_celsius * 9 / 5 + 32
def show_info(self):
print(f"Планета {self.name} имеет площадь поверхности {self.surface_area} кв.км.")
print(f"Средняя температура поверхности планеты: {self.average_temp_fahrenheit}° по Фаренгейту.")
jupiter = Planet('Юпитер', 69911, -108)
# вызовите метод show_info для Юпитера
jupiter.show_info() | 33.111111 | 105 | 0.696309 |
543670570d9752d5d2b139b8b8e10443396ab612 | 10,660 | py | Python | website/views.py | hackerspace-ntnu/website | 0c296cb39759778aaf0c296027345a658414b397 | [
"MIT"
] | 25 | 2016-04-13T20:25:37.000Z | 2021-11-26T14:41:00.000Z | website/views.py | hackerspace-ntnu/website | 0c296cb39759778aaf0c296027345a658414b397 | [
"MIT"
] | 358 | 2016-02-20T21:13:27.000Z | 2022-03-31T20:06:03.000Z | website/views.py | hackerspace-ntnu/website | 0c296cb39759778aaf0c296027345a658414b397 | [
"MIT"
] | 7 | 2016-04-18T14:03:15.000Z | 2022-02-04T14:19:47.000Z | from datetime import datetime
from random import randint
from urllib import parse as urlparse
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.shortcuts import get_object_or_404, redirect, render
from django.utils import timezone
from django.views.generic import DetailView, RedirectView, TemplateView
from applications.models import ApplicationPeriod
from committees.models import Committee
from door.models import DoorStatus
from inventory.models import ItemLoan
from news.models import Article, Event
from userprofile.models import Profile, TermsOfService
from .models import Card, FaqQuestion, Rule
from .settings import INTRANET_GREETINGS
class AcceptTosView(TemplateView):
template_name = "website/tos-returningls.html"
def get(self, request, *args, **kwargs):
if (
not self.request.user.is_authenticated
or self.request.user.profile.has_accepted_most_recent_tos()
):
# No user logged in, or user has already accepted TOS, return to main page
return redirect("/")
# Get originally visited page before TOS "pop-up"
refererUrl = request.META.get("HTTP_REFERER")
# Make sure page is valid before storing it for later
if refererUrl:
# Save users pre-TOS page path in session variable
# Parse converts from absolute to relative path
request.session["redirect_after_tos_accept"] = urlparse.urlparse(
refererUrl
).path
return super().get(self, request, *args, **kwargs)
class AcceptTosRedirectView(LoginRequiredMixin, RedirectView):
pattern_name = "index"
def get_redirect_url(self, *args, **kwargs):
profile = get_object_or_404(Profile, pk=self.request.user.profile.id)
if profile is not None:
most_recent_tos = TermsOfService.objects.order_by("-pub_date").first()
profile.accepted_tos = most_recent_tos
profile.save()
# Pop and redirect to pre-TOS path stored in session variable
# Redirects to '/' if pop fails
return self.request.session.pop(
"redirect_after_tos_accept", super().get_redirect_url(*args, **kwargs)
)
class AboutView(TemplateView):
template_name = "website/about.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["committees"] = Committee.objects.filter(active=True).order_by(
"-priority"
)
context["faq"] = FaqQuestion.objects.all()
return context
class RulesView(TemplateView):
template_name = "website/rules.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if not self.request.user.has_perm("website.can_view_internal_rule"):
context["rules"] = Rule.objects.order_by("-priority").filter(internal=False)
else:
context["rules"] = Rule.objects.order_by("-priority")
return context
class RuleDetailsView(DetailView):
model = Rule
template_name = "website/rule_details.html"
def dispatch(self, request, *args, **kwargs):
rule = self.get_object()
if rule.internal and not request.user.has_perm(
"website.can_view_internal_rule"
):
return redirect("/")
return super(RuleDetailsView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["rule"] = Rule.objects.get(id=self.object.pk)
return context
class AdminView(PermissionRequiredMixin, TemplateView):
template_name = "website/admin.html"
permission_required = "userprofile.can_view_admin"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Get all users belonging to a committee as well as pang
committee_array = list(Committee.objects.values_list("name", flat=True))
committee_array.append("Pang")
profiles = Profile.objects.filter(
user__groups__name__in=committee_array
).order_by("user__first_name")
context["profiles"] = profiles
return context
class IndexView(TemplateView):
template_name = "website/index.html"
def get_internal_articles_indicator(self):
# Determine number of hidden internal articles
if not self.request.user.has_perm("news.can_view_internal_article"):
internal_articles_count = len(
Article.objects.filter(internal=True, draft=False)
)
else:
internal_articles_count = 0
badge_text = {
"plural": {
"large": "interne artikler skjult",
"medium": "interne skjult",
"small": "skjult",
},
"singular": {
"large": "intern artikkel skjult",
"medium": "intern skjult",
"small": "skjult",
},
}
return {
"count": internal_articles_count,
"badge_text": badge_text,
"tooltip_text": "Trykk for å logge på og se interne artikler",
}
def get_internal_events_indicator(self):
current_date = datetime.now()
# Determine number of hidden internal events
if not self.request.user.has_perm("news.can_view_internal_event"):
upcoming_internal_events_count = len(
Event.objects.filter(internal=True, draft=False).filter(
time_start__gte=current_date
)
)
else:
upcoming_internal_events_count = 0
badge_text = {
"plural": {
"large": "interne arrangementer skjult",
"medium": "interne skjult",
"small": "skjult",
},
"singular": {
"large": "internt arrangement skjult",
"medium": "internt skjult",
"small": "skjult",
},
}
return {
"count": upcoming_internal_events_count,
"badge_text": badge_text,
"tooltip_text": "Trykk for å logge på og se interne arrangementer",
}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Sjekk om bruker har medlemskap og kan se interne elementer
can_access_internal_article = self.request.user.has_perm(
"news.can_view_internal_article"
)
can_access_internal_event = self.request.user.has_perm(
"news.can_view_internal_event"
)
# Get the 5 events closest to starting
event_list = list(
Event.objects.filter(
time_start__gt=timezone.now(),
internal__lte=can_access_internal_event,
draft=False,
).order_by("time_start")[:5]
)
# Add expired events if we couldn't fill the 5 slots
if len(event_list) < 5:
to_fill = 5 - len(event_list)
expired_events = Event.objects.filter(
time_start__lte=timezone.now(),
internal__lte=can_access_internal_event,
draft=False,
).order_by("-time_start")[:to_fill]
event_list += list(expired_events)
current_date = datetime.now()
# Get five published articles
article_list = Article.objects.filter(
internal__lte=can_access_internal_article, draft=False
).order_by("-pub_date")[:5]
# Få dørstatus
try:
door_status = DoorStatus.objects.get(name="hackerspace").status
except DoorStatus.DoesNotExist:
door_status = True
# hvis det ikke eksisterer en ApplicationPeriod, lag en.
if not ApplicationPeriod.objects.filter(name="Opptak"):
ApplicationPeriod.objects.create(
name="Opptak",
period_start=datetime(2018, 1, 1),
period_end=datetime(2018, 1, 2),
).save()
app_start_date = ApplicationPeriod.objects.get(name="Opptak").period_start
app_end_date = ApplicationPeriod.objects.get(name="Opptak").period_end
if (current_date < app_start_date) or (current_date > app_end_date):
is_application = False
else:
is_application = True
context = {
"article_list": article_list,
"event_list": event_list,
"internal_articles_indicator": self.get_internal_articles_indicator(),
"internal_events_indicator": self.get_internal_events_indicator(),
"door_status": door_status,
"app_start_date": app_start_date,
"app_end_date": app_end_date,
"is_application": is_application,
"index_cards": Card.objects.all(),
"current_date": current_date,
}
return context
class IntranetView(PermissionRequiredMixin, TemplateView):
template_name = "website/intranet.html"
permission_required = "userprofile.is_active_member"
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context["current_date"] = datetime.now()
# Random greeting for the intranet header banner. Just for fun
greeting = INTRANET_GREETINGS[randint(0, len(INTRANET_GREETINGS) - 1)]
# cba doing a regex or some other fancy stuff to check if the string has formatting
# just break it till it works
try:
context["greeting"] = greeting.format(self.request.user.first_name)
except IndexError:
context["greeting"] = greeting
# Find the 5 loan apps that have gone unapproved the longest
context["loan_app_list"] = ItemLoan.objects.filter(
approver__isnull=True,
).order_by("-loan_from")[:5]
# Same as in the index view
context["event_list"] = Event.objects.filter(internal=True).order_by(
"-time_start"
)[:5]
context["article_list"] = Article.objects.filter(
internal=True, draft=False
).order_by("-pub_date")[:5]
return context
def handler404(request, exception=None):
return render(request, "website/404.html", status=404)
def handler403(request, exception=None):
return render(request, "website/403.html", status=403)
def handler500(request, exception=None):
return render(request, "website/500.html", status=500)
| 34.387097 | 91 | 0.627767 |
f6279995d8ae4ab4661421c6369daffe851796a2 | 1,114 | py | Python | students/k3343/laboratory_works/Andreeva_Ekaterina/laboratory_work_2/hotel/laboratory_work_2/urls.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 10 | 2020-03-20T09:06:12.000Z | 2021-07-27T13:06:02.000Z | students/k3343/laboratory_works/Andreeva_Ekaterina/laboratory_work_2/hotel/laboratory_work_2/urls.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 134 | 2020-03-23T09:47:48.000Z | 2022-03-12T01:05:19.000Z | students/k3343/laboratory_works/Andreeva_Ekaterina/laboratory_work_2/hotel/laboratory_work_2/urls.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 71 | 2020-03-20T12:45:56.000Z | 2021-10-31T19:22:25.000Z | """laboratory_work_2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('api/v1/', include('hotel.urls')),
path('auth/', include('djoser.urls')),
path('auth/', include('djoser.urls.authtoken')),
path('auth/', include('djoser.urls.jwt')),
]
| 38.413793 | 78 | 0.682226 |
d7f28a7c63a717e4d287a6f956c5786663ce3386 | 1,407 | py | Python | app/NinePatch.py | JoeyGaojingxing/QtLearn | c6f03c5e3af13d93a622442620584493f792f20f | [
"MIT"
] | null | null | null | app/NinePatch.py | JoeyGaojingxing/QtLearn | c6f03c5e3af13d93a622442620584493f792f20f | [
"MIT"
] | null | null | null | app/NinePatch.py | JoeyGaojingxing/QtLearn | c6f03c5e3af13d93a622442620584493f792f20f | [
"MIT"
] | null | null | null | import os
from ctypes import CDLL, c_char_p
from utils.utils import convert_byte
from UI.ui_nine_patch import Ui_NinePatchFrame
from PySide2.QtWidgets import QFrame, QTableWidgetItem
from PySide2.QtCore import SIGNAL, Slot
path = os.path.join(os.getcwd(), r'build\NinePatch.so')
# path = r'D:\Programming\QtLearn\build\NinePatch.so'
nine_patch_go = CDLL(path)
def nine_patch(edge: int) -> list:
res = nine_patch_go.NinePatch
res.restype = c_char_p
return convert_byte(res(edge))[:edge**2]
class NinePatchWindow(QFrame, Ui_NinePatchFrame):
def __init__(self, main):
QFrame.__init__(self)
self.setupUi(self)
self.main = main
self.edge = 3
self.spinBoxEdge.setValue(self.edge)
self.connect(self.spinBoxEdge, SIGNAL("valueChanged(int)"), self.set_edge)
self.connect(self.pushButtonCommit, SIGNAL("clicked()"), self.run)
def closeEvent(self, event=None):
self.main.show()
@Slot(int)
def set_edge(self, val):
self.edge = val
@Slot()
def run(self):
self.tableWidgetResult.setColumnCount(self.edge)
self.tableWidgetResult.setRowCount(self.edge)
res = nine_patch(self.edge)
num = 0
for i in range(self.edge):
for j in range(self.edge):
self.tableWidgetResult.setItem(i, j, QTableWidgetItem(str(res[num])))
num += 1
| 29.93617 | 85 | 0.666667 |
011c3a09851e9b1925dfff5df2e44ca1c0e7da79 | 100,790 | py | Python | modules/templates/RLPPTM/controllers.py | krypt0x/eden | 63679c36d627b5d0be5858759217408e09aa4ef0 | [
"MIT"
] | 1 | 2018-06-06T12:11:25.000Z | 2018-06-06T12:11:25.000Z | modules/templates/RLPPTM/controllers.py | krypt0x/eden | 63679c36d627b5d0be5858759217408e09aa4ef0 | [
"MIT"
] | null | null | null | modules/templates/RLPPTM/controllers.py | krypt0x/eden | 63679c36d627b5d0be5858759217408e09aa4ef0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from uuid import uuid4
from gluon import Field, HTTP, SQLFORM, URL, current, redirect, \
CRYPT, IS_EMAIL, IS_EMPTY_OR, IS_EXPR, IS_IN_SET, IS_LENGTH, \
IS_LOWER, IS_NOT_EMPTY, IS_NOT_IN_DB, \
A, BR, DIV, FORM, H3, H4, I, INPUT, LI, TAG, TABLE, TD, TR, UL, XML
from gluon.storage import Storage
from s3 import FS, IS_PHONE_NUMBER_MULTI, JSONERRORS, S3CRUD, S3CustomController, \
S3GroupedOptionsWidget, S3LocationSelector, S3Represent, S3Request, \
S3WithIntro, s3_comments_widget, s3_get_extension, s3_mark_required, \
s3_str, s3_text_represent, s3_truncate
from .config import TESTSTATIONS
from .helpers import applicable_org_types
from .notifications import formatmap
TEMPLATE = "RLPPTM"
THEME = "RLP"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
output = {}
T = current.T
s3 = current.response.s3
auth = current.auth
settings = current.deployment_settings
# Defaults
login_form = None
login_div = None
announcements = None
announcements_title = None
roles = current.session.s3.roles
sr = auth.get_system_roles()
if sr.AUTHENTICATED in roles:
# Logged-in user
# => display announcements
from s3 import S3DateTime
dtrepr = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
filter_roles = roles if sr.ADMIN not in roles else None
posts = self.get_announcements(roles=filter_roles)
# Render announcements list
announcements = UL(_class="announcements")
if posts:
announcements_title = T("Announcements")
priority_classes = {2: "announcement-important",
3: "announcement-critical",
}
priority_icons = {2: "fa-exclamation-circle",
3: "fa-exclamation-triangle",
}
for post in posts:
# The header
header = H4(post.name)
# Priority
priority = post.priority
# Add icon to header?
icon_class = priority_icons.get(post.priority)
if icon_class:
header = TAG[""](I(_class="fa %s announcement-icon" % icon_class),
header,
)
# Priority class for the box
prio = priority_classes.get(priority, "")
row = LI(DIV(DIV(DIV(dtrepr(post.date),
_class = "announcement-date",
),
_class="fright",
),
DIV(DIV(header,
_class = "announcement-header",
),
DIV(XML(post.body),
_class = "announcement-body",
),
_class="announcement-text",
),
_class = "announcement-box %s" % prio,
),
)
announcements.append(row)
else:
# Anonymous user
# => provide a login box
login_div = DIV(H3(T("Login")),
)
auth.messages.submit_button = T("Login")
login_form = auth.login(inline=True)
output = {"login_div": login_div,
"login_form": login_form,
"announcements": announcements,
"announcements_title": announcements_title,
}
# Custom view and homepage styles
s3.stylesheets.append("../themes/%s/homepage.css" % THEME)
self._view(settings.get_theme_layouts(), "index.html")
return output
# -------------------------------------------------------------------------
@staticmethod
def get_announcements(roles=None):
"""
Get current announcements
@param roles: filter announcement by these roles
@returns: any announcements (Rows)
"""
db = current.db
s3db = current.s3db
# Look up all announcements
ptable = s3db.cms_post
stable = s3db.cms_series
join = stable.on((stable.id == ptable.series_id) & \
(stable.name == "Announcements") & \
(stable.deleted == False))
query = (ptable.date <= current.request.utcnow) & \
(ptable.expired == False) & \
(ptable.deleted == False)
if roles:
# Filter posts by roles
ltable = s3db.cms_post_role
q = (ltable.group_id.belongs(roles)) & \
(ltable.deleted == False)
rows = db(q).select(ltable.post_id,
cache = s3db.cache,
groupby = ltable.post_id,
)
post_ids = {row.post_id for row in rows}
query = (ptable.id.belongs(post_ids)) & query
posts = db(query).select(ptable.name,
ptable.body,
ptable.date,
ptable.priority,
join = join,
orderby = (~ptable.priority, ~ptable.date),
limitby = (0, 5),
)
return posts
# =============================================================================
class privacy(S3CustomController):
""" Custom Page """
def __call__(self):
output = {}
# Allow editing of page content from browser using CMS module
ADMIN = current.auth.s3_has_role("ADMIN")
s3db = current.s3db
table = s3db.cms_post
ltable = s3db.cms_post_module
module = "default"
resource = "Privacy"
query = (ltable.module == module) & \
(ltable.resource == resource) & \
(ltable.post_id == table.id) & \
(table.deleted != True)
item = current.db(query).select(table.body,
table.id,
limitby=(0, 1)).first()
if item:
if ADMIN:
content = DIV(XML(item.body),
BR(),
A(current.T("Edit"),
_href = URL(c="cms", f="post",
args = [item.id, "update"],
vars = {"module": module,
"resource": resource,
},
),
_class="action-btn",
),
)
else:
content = DIV(XML(item.body))
elif ADMIN:
content = A(current.T("Edit"),
_href = URL(c="cms", f="post", args="create",
vars = {"module": module,
"resource": resource,
},
),
_class="action-btn cms-edit",
)
else:
content = ""
output["item"] = content
self._view(THEME, "cmspage.html")
return output
# =============================================================================
class legal(S3CustomController):
""" Custom Page """
def __call__(self):
output = {}
# Allow editing of page content from browser using CMS module
ADMIN = current.auth.s3_has_role("ADMIN")
s3db = current.s3db
table = s3db.cms_post
ltable = s3db.cms_post_module
module = "default"
resource = "Legal"
query = (ltable.module == module) & \
(ltable.resource == resource) & \
(ltable.post_id == table.id) & \
(table.deleted != True)
item = current.db(query).select(table.body,
table.id,
limitby = (0, 1)
).first()
if item:
if ADMIN:
content = DIV(XML(item.body),
BR(),
A(current.T("Edit"),
_href = URL(c="cms", f="post",
args = [item.id, "update"],
vars = {"module": module,
"resource": resource,
},
),
_class="action-btn",
),
)
else:
content = DIV(XML(item.body))
elif ADMIN:
content = A(current.T("Edit"),
_href = URL(c="cms", f="post", args="create",
vars = {"module": module,
"resource": resource,
},
),
_class="action-btn cms-edit",
)
else:
content = ""
output["item"] = content
self._view(THEME, "cmspage.html")
return output
# =============================================================================
class approve(S3CustomController):
""" Custom Approval Page """
def __call__(self):
T = current.T
auth = current.auth
db = current.db
s3db = current.s3db
session = current.session
ogtable = s3db.org_group
org_group = db(ogtable.name == TESTSTATIONS).select(ogtable.id,
ogtable.pe_id,
limitby = (0, 1)
).first()
try:
org_group_pe_id = org_group.pe_id
except:
raise RuntimeError("Cannot approve user account as Org Group '%s' is missing " % TESTSTATIONS)
has_role = auth.s3_has_role
if has_role("ORG_GROUP_ADMIN",
for_pe = org_group_pe_id):
ORG_ADMIN = False
elif has_role("ORG_ADMIN"):
ORG_ADMIN = True
else:
session.error = T("Not Permitted!")
redirect(URL(c = "default",
f = "index",
args = None,
))
utable = db.auth_user
request = current.request
response = current.response
org_group_id = org_group.id
# Single User or List?
if len(request.args) > 1:
user_id = request.args[1]
user = db(utable.id == user_id).select(utable.id,
utable.first_name,
utable.last_name,
utable.email,
utable.organisation_id,
utable.org_group_id,
utable.registration_key,
utable.link_user_to, # Needed for s3_approve_user
utable.site_id, # Needed for s3_link_to_human_resource (calledfrom s3_approve_user)
limitby = (0, 1)
).first()
if not user or user.org_group_id != org_group_id:
session.error = T("Invalid Account!")
redirect(URL(c = "default",
f = "index",
args = ["approve"],
))
otable = s3db.org_organisation
organisation_id = user.organisation_id
if organisation_id:
org = db(otable.id == organisation_id).select(otable.name,
otable.pe_id,
limitby = (0, 1)
).first()
if ORG_ADMIN:
if not organisation_id or \
not has_role("ORG_ADMIN",
for_pe = org.pe_id):
session.error = T("Account not within your Organisation!")
redirect(URL(c = "default",
f = "index",
args = ["approve"],
))
person = "%(first_name)s %(last_name)s <%(email)s>" % {"first_name": user.first_name,
"last_name": user.last_name,
"email": user.email,
}
ttable = s3db.auth_user_temp
temp = db(ttable.user_id == user_id).select(ttable.custom,
limitby = (0, 1)
).first()
try:
custom = json.loads(temp.custom)
except JSONERRORS:
custom = {}
# Test Station (Organisation)
custom_get = custom.get
organisation = custom_get("organisation")
if organisation:
test_station = TR(TD("%s:" % T("Test Station")),
TD(organisation),
)
else:
test_station = None
# Org type selector
selected_type = custom_get("organisation_type")
org_types = applicable_org_types(None, group=TESTSTATIONS, represent=True)
if selected_type and selected_type not in org_types:
selected_type = None
field = Field("organisation_type", "integer",
label = T("Organization Type"),
requires = IS_IN_SET(org_types),
)
field.tablename = "approve"
from gluon.sqlhtml import OptionsWidget
type_selector = OptionsWidget.widget(field, selected_type)
# Address
location = custom_get("location")
location_get = location.get
addr_street = location_get("addr_street")
addr_postcode = location_get("addr_postcode")
L1 = location_get("L1")
L2 = location_get("L2")
L3 = location_get("L3")
L4 = location_get("L4")
represent = S3Represent(lookup = "gis_location")
address = TABLE(TR(addr_street or ""),
TR(addr_postcode or ""),
TR(represent(L4) if L4 else ""),
TR(represent(L3) if L3 else ""),
TR(represent(L2) if L2 else ""),
TR(represent(L1) if L1 else ""),
)
# Service Offer
opening_times = custom_get("opening_times")
service_modes = register.selectable_services_modes()
service_mode_id = custom_get("service_mode")
if service_mode_id:
try:
service_mode_id = int(service_mode_id)
except (ValueError, TypeError):
service_mode_id = None
if service_mode_id in service_modes:
service_mode = service_modes[service_mode_id]
else:
service_mode = service_mode_id = None
# Map selected services to the services selectable at time of approval
selectable_services = register.selectable_services()
service_ids, service_names = [], []
selected = custom_get("services")
if selected:
if not isinstance(selected, list):
selected = [selected]
for v in selected:
try:
service_id = int(v)
except (ValueError, TypeError):
continue
if service_id in selectable_services:
service_ids.append(service_id)
service_names.append(selectable_services[service_id])
services = ", ".join(service_names)
comments = custom_get("comments")
# Contact and Appointments
facility_phone = custom_get("facility_phone") or custom_get("office_phone")
facility_email = custom_get("facility_email")
facility_website = custom_get("facility_website")
booking_modes = register.selectable_booking_modes()
booking_mode_id = custom_get("booking_mode")
if booking_mode_id:
try:
booking_mode_id = int(booking_mode_id)
except (ValueError, TypeError):
booking_mode_id = None
if booking_mode_id in booking_modes:
booking_mode = booking_modes[booking_mode_id]
else:
booking_mode = booking_mode_id = None
# Administrative
# Map selected projects to the projects selectable at time of approval
selectable_projects = register.selectable_projects()
projects = []
selected = custom_get("projects")
if not isinstance(selected, (tuple, list)):
selected = [selected]
for v in selected:
try:
project_id = int(v)
except (ValueError, TypeError):
continue
if project_id in selectable_projects:
projects.append(project_id)
# Add project selector
field = Field("projects", "list:integer",
label = T("Programs"),
requires = [IS_IN_SET(selectable_projects,
multiple = True,
zero = None,
),
IS_NOT_EMPTY(),
],
)
field.tablename = "approve" # Dummy to make widget work
projects_selector = S3GroupedOptionsWidget(cols=1)(field, projects)
if user.registration_key is None:
response.warning = T("Registration has previously been Approved")
elif user.registration_key == "rejected":
response.warning = T("Registration has previously been Rejected")
elif user.registration_key != "pending":
response.warning = T("User hasn't verified their email")
approve = INPUT(_value = T("Approve"),
_type = "submit",
_name = "approve-btn",
_id = "approve-btn",
_class = "small primary button",
)
reject = INPUT(_value = T("Reject"),
_type = "submit",
_name = "reject-btn",
_id = "reject-btn",
_class = "small alert button",
)
strrepr = lambda v: v if v else "-"
form = FORM(TABLE(TR(approve,
reject,
),
TR(TD("%s:" % T("Person")),
TD(person),
),
test_station,
TR(TD("%s:" % T("Organization Type")),
TD(type_selector),
),
TR(TD("%s:" % T("Address")),
TD(address),
),
TR(TD("%s:" % T("Opening Hours")),
TD(strrepr(opening_times)),
),
TR(TD("%s:" % T("Service Mode")),
TD(strrepr(service_mode)),
),
TR(TD("%s:" % T("Services")),
TD(strrepr(services)),
),
TR(TD("%s:" % T("Telephone")),
TD(strrepr(facility_phone)),
),
TR(TD("%s:" % T("Email")),
TD(strrepr(facility_email)),
),
TR(TD("%s:" % T("Website")),
TD(strrepr(facility_website)),
),
TR(TD("%s:" % T("Appointments via")),
TD(strrepr(booking_mode)),
),
TR(TD("%s:" % T("Projects")),
TD(projects_selector),
),
TR(TD("%s:" % T("Comments")),
TD(s3_text_represent(strrepr(comments))),
),
),
_class = "approve-form",
)
if form.accepts(request.post_vars, session, formname="approve"):
form_vars = form.vars
rejected = bool(form_vars.get("reject-btn"))
approved = bool(form_vars.get("approve-btn")) and not rejected
if approved:
set_record_owner = auth.s3_set_record_owner
s3db_onaccept = s3db.onaccept
update_super = s3db.update_super
if not organisation_id:
# Create organisation
org = {"name": organisation}
org["id"] = organisation_id = otable.insert(**org)
update_super(otable, org)
set_record_owner(otable, org, owned_by_user=user_id)
s3db_onaccept(otable, org, method="create")
# Link organisation to TESTSTATIONS group
mtable = s3db.org_group_membership
membership = {"group_id": org_group_id,
"organisation_id": organisation_id,
}
membership["id"] = mtable.insert(**membership)
set_record_owner(mtable, membership)
s3db_onaccept(mtable, membership, method="create")
# Link organisation to selected organisation type
type_id = form_vars.get("organisation_type")
if type_id:
ltable = s3db.org_organisation_organisation_type
type_id = int(type_id)
link = {"organisation_id": organisation_id,
"organisation_type_id": type_id,
}
link["id"] = ltable.insert(**link)
set_record_owner(ltable, link)
s3db_onaccept(ltable, link, method="create")
# Link organisation to selected projects
selected = form_vars.get("projects")
if isinstance(selected, (tuple, list)):
ltable = s3db.project_organisation
for item in selected:
try:
project_id = int(item)
except (ValueError, TypeError):
continue
link = {"project_id": project_id,
"organisation_id": organisation_id,
"role": 2,
}
link["id"] = ltable.insert(**link)
set_record_owner(ltable, link)
s3db_onaccept(ltable, link, method="create")
# Add default tags
from .helpers import add_organisation_default_tags
add_organisation_default_tags(organisation_id)
# Update user
user.update_record(organisation_id = organisation_id,
registration_key = None,
)
# Grant ORG_ADMIN and PROVIDER_ACCOUNTANT
auth.s3_assign_role(user_id, "ORG_ADMIN", for_pe=org["pe_id"])
auth.s3_assign_role(user_id, "PROVIDER_ACCOUNTANT")
else:
# Update user
user.update_record(registration_key = None)
# Grant VOUCHER_PROVIDER
auth.s3_assign_role(user_id, "VOUCHER_PROVIDER")
location_id = location_get("id")
if not location_id:
# Create location
ltable = s3db.gis_location
del location["wkt"] # Will get created during onaccept & we don't want the 'Source WKT has been cleaned by Shapely" warning
location["id"] = location_id = ltable.insert(**location)
set_record_owner(ltable, location, owned_by_user=user_id)
s3db_onaccept(ltable, location, method="create")
# Create facility
ftable = s3db.org_facility
facility_name = organisation if organisation else org.name
facility = {"name": s3_truncate(facility_name),
"organisation_id": organisation_id,
"location_id": location_id,
"phone1": facility_phone,
"email": facility_email,
"website": facility_website,
"opening_times": opening_times,
"comments": comments,
}
facility_id = facility["id"] = ftable.insert(**facility)
update_super(ftable, facility)
set_record_owner(ftable, facility, owned_by_user=user_id)
s3db_onaccept(ftable, facility, method="create")
site_id = facility["site_id"]
# Capture site details
dtable = s3db.org_site_details
details = {"site_id": site_id,
"service_mode_id": service_mode_id,
"booking_mode_id": booking_mode_id,
}
details["id"] = dtable.insert(**details)
update_super(dtable, details)
set_record_owner(dtable, details, owned_by_user=user_id)
s3db_onaccept(dtable, details, method="create")
# Link facility to facility type
fttable = s3db.org_facility_type
tltable = s3db.org_site_facility_type
facility_type = db(fttable.name == "Infection Test Station") \
.select(fttable.id, limitby=(0, 1)).first()
if facility_type:
tltable.insert(site_id = site_id,
facility_type_id = facility_type.id,
)
# Link facility to services
if service_ids:
sltable = s3db.org_service_site
for service_id in service_ids:
link = {"service_id": service_id,
"site_id": site_id,
}
link["id"] = sltable.insert(**link)
set_record_owner(sltable, link, owned_by_user=user_id)
s3db_onaccept(sltable, link, method="create")
# Add default tags for facility
from .helpers import add_facility_default_tags
add_facility_default_tags(facility_id)
# Approve user
auth.s3_approve_user(user)
# Send welcome email
settings = current.deployment_settings
from .notifications import CMSNotifications
error = CMSNotifications.send(user.email,
"WelcomeProvider",
{"name": organisation or org.name,
"homepage": settings.get_base_public_url(),
"profile": URL("default", "person", host=True),
},
module = "auth",
resource = "user",
)
if error:
session.warning = "%s: %s" % (T("Welcome Email NOT sent"), error)
session.confirmation = T("Registration approved")
redirect(URL(c = "default",
f = "index",
args = ["approve"],
))
elif rejected:
user.update_record(registration_key = "rejected")
session.confirmation = T("Registration rejected")
redirect(URL(c = "default",
f = "index",
args = ["approve"],
))
output = {"form": form,
"title": T("Approve Test Station"),
}
# Custom View
self._view("RLPPTM", "approve.html")
else:
# List View
if ORG_ADMIN:
# Filter to just their users
gtable = db.auth_group
mtable = db.auth_membership
query = (mtable.user_id == auth.user.id) & \
(mtable.group_id == gtable.id) & \
(gtable.uuid == "ORG_ADMIN")
memberships = db(query).select(mtable.pe_id)
pe_id = [m.pe_id for m in memberships]
otable = s3db.org_organisation
orgs = db(otable.pe_id.belongs(pe_id)).select(otable.id)
organisation_id = [org.id for org in orgs]
accounts_filter = FS("organisation_id").belongs(organisation_id)
else:
# Filter to all for the ORG_GROUP
accounts_filter = FS("org_group_id") == org_group_id
# Only include pending accounts
accounts_filter &= FS("registration_key") == "pending"
resource = s3db.resource("auth_user", filter=accounts_filter)
list_id = "datatable"
# List fields
list_fields = resource.list_fields()
orderby = None
s3 = response.s3
representation = s3_get_extension(request) or \
S3Request.DEFAULT_REPRESENTATION
# Pagination
get_vars = request.get_vars
if representation == "aadata":
start, limit = S3CRUD._limits(get_vars)
else:
# Initial page request always uses defaults (otherwise
# filtering and pagination would have to be relative to
# the initial limits, but there is no use-case for that)
start = None
limit = None if s3.no_sspag else 0
left = []
distinct = False
dtargs = {}
if representation in S3Request.INTERACTIVE_FORMATS:
# How many records per page?
if s3.dataTable_pageLength:
display_length = s3.dataTable_pageLength
else:
display_length = 25
# Server-side pagination?
if not s3.no_sspag:
dt_pagination = "true"
if not limit:
limit = 2 * display_length
session.s3.filter = get_vars
if orderby is None:
dt_sorting = {"iSortingCols": "1",
"sSortDir_0": "asc"
}
if len(list_fields) > 1:
dt_sorting["bSortable_0"] = "false"
dt_sorting["iSortCol_0"] = "1"
else:
dt_sorting["bSortable_0"] = "true"
dt_sorting["iSortCol_0"] = "0"
orderby, left = resource.datatable_filter(list_fields,
dt_sorting,
)[1:3]
else:
dt_pagination = "false"
# Disable exports
s3.no_formats = True
# Get the data table
dt, totalrows = resource.datatable(fields = list_fields,
start = start,
limit = limit,
left = left,
orderby = orderby,
distinct = distinct,
)
displayrows = totalrows
# Always show table, otherwise it can't be Ajax-filtered
# @todo: need a better algorithm to determine total_rows
# (which excludes URL filters), so that datatables
# shows the right empty-message (ZeroRecords instead
# of EmptyTable)
dtargs["dt_pagination"] = dt_pagination
dtargs["dt_pageLength"] = display_length
dtargs["dt_base_url"] = URL(c="default", f="index", args="approve")
dtargs["dt_permalink"] = URL(c="default", f="index", args="approve")
datatable = dt.html(totalrows,
displayrows,
id = list_id,
**dtargs)
# Action Buttons
s3.actions = [{"label": s3_str(T("Review")),
"url": URL(args = ["approve", "[id]"],
),
"_class": "action-btn",
},
]
output = {"items": datatable,
"title": T("Test Stations to be Approved"),
}
# Custom View
self._view(TEMPLATE, "approve_list.html")
elif representation == "aadata":
# Apply datatable filters
searchq, orderby, left = resource.datatable_filter(list_fields,
get_vars)
if searchq is not None:
totalrows = resource.count()
resource.add_filter(searchq)
else:
totalrows = None
# Get a data table
if totalrows != 0:
dt, displayrows = resource.datatable(fields = list_fields,
start = start,
limit = limit,
left = left,
orderby = orderby,
distinct = distinct,
)
else:
dt, displayrows = None, 0
if totalrows is None:
totalrows = displayrows
# Echo
draw = int(get_vars.get("draw", 0))
# Representation
if dt is not None:
output = dt.json(totalrows,
displayrows,
list_id,
draw,
**dtargs)
else:
output = '{"recordsTotal":%s,' \
'"recordsFiltered":0,' \
'"dataTable_id":"%s",' \
'"draw":%s,' \
'"data":[]}' % (totalrows, list_id, draw)
else:
S3Request("auth", "user").error(415, current.ERROR.BAD_FORMAT)
return output
# =============================================================================
class register(S3CustomController):
""" Custom Registration Page """
def __call__(self):
auth = current.auth
if auth.s3_logged_in():
# Redirect if already logged-in
redirect(URL(c="default", f="index"))
auth_settings = auth.settings
auth_messages = auth.messages
self.customise_auth_messages()
T = current.T
db = current.db
s3db = current.s3db
request = current.request
response = current.response
session = current.session
settings = current.deployment_settings
if not settings.get_custom(key="test_station_registration"):
session.error = T("Function not available")
redirect(URL(c="default", f="index"))
utable = auth_settings.table_user
# Page title and intro text
title = T("Register Test Station")
# Get intro text from CMS
db = current.db
s3db = current.s3db
ctable = s3db.cms_post
ltable = s3db.cms_post_module
join = ltable.on((ltable.post_id == ctable.id) & \
(ltable.module == "auth") & \
(ltable.resource == "user") & \
(ltable.deleted == False))
query = (ctable.name == "SelfRegistrationIntro") & \
(ctable.deleted == False)
row = db(query).select(ctable.body,
join = join,
cache = s3db.cache,
limitby = (0, 1),
).first()
intro = row.body if row else None
# Form Fields
formfields, required_fields, subheadings = self.formfields()
# Generate labels (and mark required fields in the process)
labels, has_required = s3_mark_required(formfields,
mark_required = required_fields,
)
response.s3.has_required = has_required
# Form buttons
REGISTER = T("Register")
buttons = [INPUT(_type = "submit",
_value = REGISTER,
),
# TODO cancel-button?
]
# Construct the form
response.form_label_separator = ""
form = SQLFORM.factory(table_name = utable._tablename,
record = None,
hidden = {"_next": request.vars._next},
labels = labels,
separator = "",
showid = False,
submit_button = REGISTER,
delete_label = auth_messages.delete_label,
formstyle = settings.get_ui_formstyle(),
buttons = buttons,
*formfields)
# Identify form for CSS & JS Validation
form.add_class("auth_register")
# Add Subheadings
if subheadings:
for pos, heading in subheadings[::-1]:
form[0].insert(pos, DIV(heading, _class="subheading"))
# Inject client-side Validation
auth.s3_register_validation()
# Set default registration key, so new users are prevented
# from logging in until approved
key = str(uuid4())
code = uuid4().hex[-6:].upper()
utable.registration_key.default = self.keyhash(key, code)
if form.accepts(request.vars,
session,
formname = "register",
onvalidation = auth_settings.register_onvalidation,
):
formvars = form.vars
organisation = formvars.get("organisation")
# Check if organisation already exists
organisation_id = self.lookup_organisation(formvars)
if organisation_id:
formvars["organisation_id"] = organisation_id
# Create the user record
user_id = utable.insert(**utable._filter_fields(formvars, id=False))
formvars.id = user_id
# Set org_group
ogtable = s3db.org_group
org_group = db(ogtable.name == TESTSTATIONS).select(ogtable.id,
limitby = (0, 1)
).first()
try:
org_group_id = org_group.id
except:
raise RuntimeError("Cannot register user account as Org Group '%s' is missing " % TESTSTATIONS)
db(utable.id == user_id).update(org_group_id = org_group_id)
# Save temporary user fields in s3db.auth_user_temp
temptable = s3db.auth_user_temp
record = {"user_id": user_id}
record["consent"] = formvars.consent
# Store Custom fields
custom = {"location": formvars.location,
"opening_times": formvars.opening_times,
"service_mode": formvars.service_mode,
"services": formvars.services,
"facility_phone": formvars.facility_phone,
"facility_email": formvars.facility_email,
"facility_website": formvars.facility_website,
"booking_mode": formvars.booking_mode,
"comments": formvars.comments,
"projects": formvars.projects,
}
if not organisation_id:
custom["organisation"] = organisation
custom["organisation_type"] = formvars.organisation_type
record["custom"] = json.dumps(custom)
temptable.insert(**record)
# Post-process the new user record
users = db(utable.id > 0).select(utable.id, limitby=(0, 2))
if len(users) == 1:
# 1st user to register doesn't need verification/approval
auth.s3_approve_user(form.vars)
session.confirmation = auth_messages.registration_successful
# 1st user gets Admin rights
admin_group_id = 1
auth.add_membership(admin_group_id, users.first().id)
# Log them in
if "language" not in form.vars:
# Was missing from login form
form.vars.language = T.accepted_language
user = Storage(utable._filter_fields(form.vars, id=True))
auth.login_user(user)
# Send welcome email
auth.s3_send_welcome_email(form.vars)
# Where to go next?
register_next = request.vars._next or auth_settings.register_next
else:
# Request User Verify their Email
# System Details for Verification Email
verify_url = URL(c = "default",
f = "index",
args = ["verify_email", key],
scheme = "https" if request.is_https else "http",
)
system = {"system_name": settings.get_system_name(),
"url": verify_url,
#"url": "%s/default/index/verify_email/%s" % (response.s3.base_url, key),
"code": code,
}
# Try to send the Verification Email
if not auth_settings.mailer or \
not auth_settings.mailer.settings.server or \
not auth_settings.mailer.send(to = form.vars.email,
subject = auth_messages.verify_email_subject % system,
message = auth_messages.verify_email % system,
):
response.error = auth_messages.email_verification_failed
# Custom View
self._view(THEME, "register.html")
return {"title": title,
"form": form,
}
# Redirect to Verification Info page
register_next = URL(c = "default",
f = "message",
args = ["verify_email_sent"],
vars = {"email": form.vars.email},
)
# Log action
auth.log_event(auth_messages.register_log, form.vars)
# Redirect
redirect(register_next)
elif form.errors:
response.error = T("There are errors in the form, please check your input")
# Custom View
self._view(THEME, "register.html")
return {"title": title,
"intro": intro,
"form": form,
}
# -------------------------------------------------------------------------
@classmethod
def formfields(cls):
"""
Generate the form fields for the registration form
@returns: a tuple (formfields, required_fields, subheadings)
- formfields = list of form fields
- required_fields = list of field names of required fields
- subheadings = list of tuples (position, heading) to
insert into the form
"""
T = current.T
request = current.request
#db = current.db
s3db = current.s3db
auth = current.auth
auth_settings = auth.settings
auth_messages = auth.messages
utable = auth_settings.table_user
passfield = auth_settings.password_field
# Instantiate Consent Tracker
consent = s3db.auth_Consent(processing_types=["SHARE", "RULES_PRO"])
# Last name is required
utable.last_name.requires = IS_NOT_EMPTY(error_message=T("input required"))
#ltable = s3db.gis_location
# Lookup projects with provider self-registration
projects = cls.selectable_projects()
# Lookup site services
services = cls.selectable_services()
# Lookup applicable organisation types
org_types = applicable_org_types(None, group=TESTSTATIONS, represent=True)
# Form fields
formfields = [# -- User account ---
utable.first_name,
utable.last_name,
utable.email,
utable[passfield],
# Password Verification Field
Field("password_two", "password",
label = auth_messages.verify_password,
requires = IS_EXPR("value==%s" % \
repr(request.vars.get(passfield)),
error_message = auth_messages.mismatched_password,
),
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (auth_messages.verify_password,
T("Enter the same password again"),
),
),
),
# -- Test Station ---
Field("organisation",
label = T("Name"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(60)],
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Test Station Name"),
T("Specify the name of the test station (max 60 characters)"),
),
),
),
Field("organisation_type", "integer",
label = T("Organization Type"),
requires = IS_IN_SET(org_types),
),
# -- Address --
Field("location", "json",
widget = S3LocationSelector(
levels = ("L1", "L2", "L3", "L4"),
required_levels = ("L1", "L2", "L3"),
show_address = True,
address_required = True,
show_postcode = True,
postcode_required = True,
show_map = True,
),
),
# -- Service Offer --
Field("opening_times",
label = T("Opening Hours"),
requires = IS_NOT_EMPTY(),
),
Field("service_mode", "integer",
label = T("Service Mode"),
requires = IS_IN_SET(cls.selectable_services_modes()),
),
Field("services", "list:integer",
label = T("Services"),
requires = IS_IN_SET(services,
multiple = True,
zero = None,
),
widget = S3WithIntro(S3GroupedOptionsWidget(cols=1),
# Widget intro from CMS
intro = ("org",
"facility",
"SiteServiceIntro",
),
),
),
# -- Contact and Appointments --
Field("facility_phone",
label = T("Telephone"),
requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI()),
),
Field("facility_email",
label = T("Email"),
requires = IS_EMPTY_OR(IS_EMAIL()),
),
Field("facility_website",
label = T("Website"),
),
Field("booking_mode", "integer",
label = T("Appointments via"),
requires = IS_EMPTY_OR(IS_IN_SET(
cls.selectable_booking_modes(),
)),
),
Field("comments", "text",
label = T("Comments"),
widget = s3_comments_widget,
),
# -- Administrative --
Field("projects", "list:integer",
label = T("Programs"),
requires = [IS_IN_SET(projects,
multiple = True,
zero = None,
),
IS_NOT_EMPTY(),
],
widget = S3WithIntro(S3GroupedOptionsWidget(cols=1),
# Widget intro from CMS
intro = ("org",
"organisation",
"ProjectParticipationIntro",
),
),
),
# -- Privacy and Consent --
Field("consent",
label = T("Consent"),
widget = consent.widget,
),
]
# Required fields
required_fields = ["first_name",
"last_name",
]
# Subheadings
subheadings = ((0, T("User Account")),
(5, T("Test Station")),
(7, T("Address")),
(8, T("Service Offer")),
(11, T("Contact and Appointments")),
(16, T("Administrative")),
(17, "%s / %s" % (T("Privacy"), T("Terms of Service"))),
)
# Geocoder
current.response.s3.scripts.append("/%s/static/themes/RLP/js/geocoderPlugin.js" % request.application)
return formfields, required_fields, subheadings
# -------------------------------------------------------------------------
@staticmethod
def keyhash(key, code):
"""
Generate a hash of the activation code using
the registration key
@param key: the registration key
@param code: the activation code
@returns: the hash as string
"""
crypt = CRYPT(key=key, digest_alg="sha512", salt=None)
return str(crypt(code.upper())[0])
# -------------------------------------------------------------------------
@staticmethod
def customise_auth_messages():
"""
Customise auth messages:
- verification email
- welcome email
"""
messages = current.auth.messages
messages.verify_email_subject = "%(system_name)s - Verify Email"
messages.verify_email = \
"""Click on the link %(url)s to verify your email.
Your Activation Code: %(code)s
"""
messages.welcome_email_subject = "Welcome to the %(system_name)s Portal"
messages.welcome_email = \
"""Welcome to the %(system_name)s Portal
- To edit your profile go to: %(url)s%(profile)s
Thank you
"""
# -------------------------------------------------------------------------
@staticmethod
def lookup_organisation(formvars):
"""
Identify the organisation the user attempts to register for,
by name, facility Lx and if necessary facility email address
@param formvars: the FORM vars
@returns: organisation_id if found, or None if this is a new
organisation
"""
orgname = formvars.get("organisation")
if not orgname:
return None
db = current.db
s3db = current.s3db
otable = s3db.org_organisation
ftable = s3db.org_facility
ltable = s3db.gis_location
gtable = s3db.org_group
mtable = s3db.org_group_membership
# Search by name among test stations
query = (otable.name == orgname) & \
(otable.deleted == False)
join = [mtable.on(mtable.organisation_id == otable.id),
gtable.on((gtable.id == mtable.group_id) & \
(gtable.name == TESTSTATIONS)),
ftable.on(ftable.organisation_id == otable.id),
]
# Do we have a selected location (should have since mandatory)
location = formvars.get("location")
if isinstance(location, str):
try:
location = json.loads(location)
except JSONERRORS:
location = None
if location:
# Include the Lx ancestor in the lookup
ancestor = None
for level in ("L4", "L3", "L2"):
ancestor = location.get(level)
if ancestor:
break
if ancestor:
join.append(ltable.on(ltable.id == ftable.location_id))
query &= ((ltable.level == None) & (ltable.parent == ancestor)) | \
(ltable.id == ancestor)
rows = db(query).select(otable.id, join = join)
organisation_id = None
if len(rows) > 1:
# Multiple matches => try using facility email to reduce
facility_email = formvars.get("facility_email")
if facility_email:
candidates = {row.id for row in rows}
query = (ftable.organisation_id.belongs(candidates)) & \
(ftable.email == facility_email) & \
(ftable.deleted == False)
match = db(query).select(ftable.organisation_id,
limitby = (0, 2),
)
if len(match) == 1:
organisation_id = match.first().organisation_id
elif rows:
# Single match - this organisation already exists
organisation_id = rows.first().organisation_id
return organisation_id
# -------------------------------------------------------------------------
@staticmethod
def selectable_projects():
"""
Projects the user can select during test station registration
=> all projects that are tagged with APPLY=Y
@returns: list of project_ids
"""
db = current.db
s3db = current.s3db
# Lookup projects with provider self-registration
ptable = s3db.project_project
ttable = s3db.project_project_tag
join = ttable.on((ttable.project_id == ptable.id) & \
(ttable.tag == "APPLY") & \
(ttable.value == "Y") & \
(ttable.deleted == False))
query = (ptable.deleted == False)
rows = db(query).select(ptable.id,
ptable.name,
join = join,
)
projects = {row.id: row.name for row in rows}
return projects
# -------------------------------------------------------------------------
@staticmethod
def selectable_services():
"""
Services the user can select during test station registration
@returns: list of service_ids
"""
db = current.db
s3db = current.s3db
stable = s3db.org_service
query = (stable.deleted == False)
rows = db(query).select(stable.id,
stable.name,
)
services = {row.id: row.name for row in rows}
return services
# -------------------------------------------------------------------------
@staticmethod
def selectable_services_modes():
"""
Service modes the user can select during test station registration
@returns: list of service_ids
"""
db = current.db
s3db = current.s3db
mtable = s3db.org_service_mode
query = (mtable.deleted == False)
rows = db(query).select(mtable.id,
mtable.name,
)
modes = {row.id: row.name for row in rows}
return modes
# -------------------------------------------------------------------------
@staticmethod
def selectable_booking_modes():
"""
Booking modes the user can select during test station registration
@returns: list of service_ids
"""
db = current.db
s3db = current.s3db
mtable = s3db.org_booking_mode
query = (mtable.deleted == False)
rows = db(query).select(mtable.id,
mtable.name,
)
modes = {row.id: row.name for row in rows}
return modes
# =============================================================================
class verify_email(S3CustomController):
""" Custom verify_email Page """
def __call__(self):
T = current.T
request = current.request
response = current.response
session = current.session
settings = current.deployment_settings
# Get the registration key
if request.env.request_method == "POST":
key = request.post_vars.registration_key
elif len(request.args) > 1:
key = request.args[-1]
else:
key = None
if not key:
session.error = T("Missing registration key")
redirect(URL(c="default", f="index"))
formfields = [Field("activation_code",
label = T("Please enter your Activation Code"),
requires = IS_NOT_EMPTY(),
),
]
# Construct the form
response.form_label_separator = ""
form = SQLFORM.factory(table_name = "auth_user",
record = None,
hidden = {"_next": request.vars._next,
"registration_key": key,
},
separator = ":",
showid = False,
submit_button = T("Submit"),
formstyle = settings.get_ui_formstyle(),
#buttons = buttons,
*formfields)
if form.accepts(request.vars,
session,
formname = "register_confirm",
):
db = current.db
s3db = current.s3db
auth = current.auth
auth_settings = auth.settings
register.customise_auth_messages()
# Get registration key from URL
code = form.vars.activation_code
# Find the pending user account
utable = auth_settings.table_user
query = (utable.registration_key == register.keyhash(key, code))
user = db(query).select(limitby = (0, 1),
).first()
if not user:
session.error = T("Registration not found")
redirect(auth_settings.verify_email_next)
user_id = user.id
db(utable.id == user_id).update(registration_key = "pending")
auth.log_event(auth.messages.verify_email_log, user)
# Lookup the Approver(s)
gtable = db.auth_group
mtable = db.auth_membership
pe_id = None
# Is this an existing Org?
organisation_id = user.organisation_id
if organisation_id:
role_required = "ORG_ADMIN"
# Get org_name and pe_id from organisation
otable = s3db.org_organisation
row = db(otable.id == organisation_id).select(otable.name,
otable.pe_id,
limitby = (0, 1)
).first()
if row:
org_name = row.name
pe_id = row.pe_id
subject = """%(system_name)s - New User Approval Pending"""
message = """Your action is required to approve a New User for %(org_name)s:
%(user_name)s
Please go to %(url)s to approve this user."""
if not pe_id:
role_required = "ORG_GROUP_ADMIN"
subject = """%(system_name)s - New Test Station Approval Pending"""
message = """Your action is required to approve a New Test Station for %(system_name)s:
%(org_name)s
Please go to %(url)s to approve this station."""
# Get org_name from auth_user_temp
ttable= s3db.auth_user_temp
temp = db(ttable.user_id == user_id).select(ttable.custom,
limitby = (0, 1)
).first()
try:
custom = json.loads(temp.custom)
except JSONERRORS:
custom = {}
org_name = custom.get("organisation")
# Get pe_id of TESTSTATIONS group
ogtable = s3db.org_group
query = (ogtable.name == TESTSTATIONS) & \
(ogtable.deleted == False)
row = db(query).select(ogtable.pe_id, limitby=(0, 1)).first()
if row:
pe_id = row.pe_id
query = (mtable.pe_id == 0)
if pe_id:
query |= (mtable.pe_id == pe_id)
join = [mtable.on((mtable.user_id == utable.id) & \
(mtable.deleted == False)),
gtable.on((gtable.id == mtable.group_id) & \
(gtable.uuid == role_required)),
]
approvers = db(query).select(utable.email,
utable.language,
join = join,
)
# Ensure that we send out the mails in the language that the approver(s) want
languages = {}
for approver in approvers:
language = approver.language
if language not in languages:
languages[language] = [approver.email]
else:
languages[language].append(approver.email)
subjects = {}
messages = {}
system_name = settings.get_system_name()
base_url = response.s3.base_url
url = "%s/default/index/approve/%s" % (base_url, user_id)
for language in languages:
subjects[language] = s3_str(T(subject, language=language) %
{"system_name": system_name,
})
messages[language] = s3_str(T(message, language=language) %
{"org_name": org_name,
"system_name": system_name,
"user_name": user.email,
"url": url,
})
result = None
mailer = auth_settings.mailer
if mailer.settings.server:
send_email = mailer.send
for approver in approvers:
language = approver["language"]
result = send_email(to = approver["email"],
subject = subjects[language],
message = messages[language]
)
session = current.session
if result:
session.confirmation = settings.get_auth_registration_pending_approval()
else:
# Don't prevent registration just because email not configured
#db.rollback()
session.error = auth.messages.email_send_failed
redirect(URL(c="default", f="index"))
self._view(THEME, "register.html")
return {"title": T("Confirm Registration"),
"form": form,
}
# -------------------------------------------------------------------------
@staticmethod
def send_welcome_email(user):
"""
Send a welcome email to the new user
@param user: the auth_user Row
"""
register.customise_auth_messages()
auth_messages = current.auth.messages
try:
recipient = user["email"]
except (KeyError, TypeError):
recipient = None
if not recipient:
current.response.error = auth_messages.unable_send_email
return
# Look up CMS template for welcome email
db = current.db
s3db = current.s3db
settings = current.deployment_settings
# Define join
ctable = s3db.cms_post
ltable = s3db.cms_post_module
join = ltable.on((ltable.post_id == ctable.id) & \
(ltable.module == "auth") & \
(ltable.resource == "user") & \
(ltable.deleted == False))
# Get message template
query = (ctable.name == "WelcomeMessage") & \
(ctable.deleted == False)
row = db(query).select(ctable.doc_id,
ctable.body,
join = join,
limitby = (0, 1),
).first()
if row:
message_template = row.body
else:
# Disabled
return
# Look up attachments
dtable = s3db.doc_document
query = (dtable.doc_id == row.doc_id) & \
(dtable.file != None) & (dtable.file != "") & \
(dtable.deleted == False)
rows = db(query).select(dtable.file)
attachments = []
for row in rows:
filename, stream = dtable.file.retrieve(row.file)
attachments.append(current.mail.Attachment(stream, filename=filename))
# Default subject from auth.messages
system_name = s3_str(settings.get_system_name())
subject = s3_str(auth_messages.welcome_email_subject % \
{"system_name": system_name})
# Custom message body
data = {"system_name": system_name,
"url": settings.get_base_public_url(),
"profile": URL("default", "person", host=True),
}
message = formatmap(message_template, data)
# Send email
success = current.msg.send_email(to = recipient,
subject = subject,
message = message,
attachments = attachments,
)
if not success:
current.response.error = auth_messages.unable_send_email
# =============================================================================
class register_invited(S3CustomController):
""" Custom Registration Page """
def __call__(self):
auth = current.auth
# Redirect if already logged-in
if auth.s3_logged_in():
redirect(URL(c="default", f="index"))
T = current.T
settings = current.deployment_settings
request = current.request
response = current.response
session = current.session
# Get the registration key
if len(request.args) > 1:
key = request.args[-1]
session.s3.invite_key = key
redirect(URL(c="default", f="index", args = ["register_invited"]))
else:
key = session.s3.invite_key
if not key:
session.error = T("Missing registration key")
redirect(URL(c="default", f="index"))
# Page title and intro text
title = T("Registration")
# Get intro text from CMS
db = current.db
s3db = current.s3db
ctable = s3db.cms_post
ltable = s3db.cms_post_module
join = ltable.on((ltable.post_id == ctable.id) & \
(ltable.module == "auth") & \
(ltable.resource == "user") & \
(ltable.deleted == False))
query = (ctable.name == "InvitedRegistrationIntro") & \
(ctable.deleted == False)
row = db(query).select(ctable.body,
join = join,
cache = s3db.cache,
limitby = (0, 1),
).first()
intro = row.body if row else None
# Customise Auth Messages
auth_settings = auth.settings
auth_messages = auth.messages
self.customise_auth_messages()
# Form Fields
formfields, required_fields = self.formfields()
# Generate labels (and mark required fields in the process)
labels, has_required = s3_mark_required(formfields,
mark_required = required_fields,
)
response.s3.has_required = has_required
# Form buttons
REGISTER = T("Register")
buttons = [INPUT(_type = "submit",
_value = REGISTER,
),
# TODO cancel-button?
]
# Construct the form
utable = auth_settings.table_user
response.form_label_separator = ""
form = SQLFORM.factory(table_name = utable._tablename,
record = None,
hidden = {"_next": request.vars._next},
labels = labels,
separator = "",
showid = False,
submit_button = REGISTER,
delete_label = auth_messages.delete_label,
formstyle = settings.get_ui_formstyle(),
buttons = buttons,
*formfields)
# Identify form for CSS & JS Validation
form.add_class("auth_register")
# Inject client-side Validation
auth.s3_register_validation()
if form.accepts(request.vars,
session,
formname = "register",
onvalidation = self.validate(key),
):
form_vars = form.vars
# Get the account
account = self.account(key, form_vars.code)
account.update_record(**utable._filter_fields(form_vars, id=False))
del session.s3["invite_key"]
# Post-process the new user record
s3db.configure("auth_user",
register_onaccept = self.register_onaccept,
)
# Store consent response (for approve_user to register it)
consent = form_vars.consent
if consent:
ttable = s3db.auth_user_temp
record = {"user_id": account.id,
"consent": form_vars.consent
}
ttable.insert(**record)
# Approve and link user
auth.s3_approve_user(account)
# Send welcome email (custom)
self.send_welcome_email(account)
# Log them in
user = Storage(utable._filter_fields(account, id=True))
auth.login_user(user)
auth_messages = auth.messages
auth.log_event(auth_messages.register_log, user)
session.flash = auth_messages.registration_successful
# TODO redirect to the org instead?
redirect(URL(c="default", f="person"))
elif form.errors:
response.error = T("There are errors in the form, please check your input")
# Custom View
self._view(TEMPLATE, "register_invited.html")
return {"title": title,
"intro": intro,
"form": form,
}
# -------------------------------------------------------------------------
@classmethod
def validate(cls, key):
"""
Custom validation of registration form
- check the registration code
- check for duplicate email
"""
T = current.T
def register_onvalidation(form):
code = form.vars.get("code")
account = cls.account(key, code)
if not account:
form.errors["code"] = T("Invalid Registration Code")
return
email = form.vars.get("email")
from gluon.validators import ValidationError
auth = current.auth
utable = auth.settings.table_user
dbset = current.db(utable.id != account.id)
requires = IS_NOT_IN_DB(dbset, "%s.email" % utable._tablename)
try:
requires.validate(email)
except ValidationError:
form.errors["email"] = auth.messages.duplicate_email
return
onvalidation = current.auth.settings.register_onvalidation
if onvalidation:
from gluon.tools import callback
callback(onvalidation, form, tablename="auth_user")
return register_onvalidation
# -------------------------------------------------------------------------
@staticmethod
def register_onaccept(user_id):
"""
Process Registration
@param user_id: the user ID
"""
auth = current.auth
assign_role = auth.s3_assign_role
assign_role(user_id, "ORG_ADMIN")
assign_role(user_id, "VOUCHER_ISSUER")
# -------------------------------------------------------------------------
@classmethod
def send_welcome_email(cls, user):
"""
Send a welcome email to the new user
@param user: the auth_user Row
"""
cls.customise_auth_messages()
auth_messages = current.auth.messages
# Look up CMS template for welcome email
try:
recipient = user["email"]
except (KeyError, TypeError):
recipient = None
if not recipient:
current.response.error = auth_messages.unable_send_email
return
db = current.db
s3db = current.s3db
settings = current.deployment_settings
# Define join
ctable = s3db.cms_post
ltable = s3db.cms_post_module
join = ltable.on((ltable.post_id == ctable.id) & \
(ltable.module == "auth") & \
(ltable.resource == "user") & \
(ltable.deleted == False))
# Get message template
query = (ctable.name == "WelcomeMessageInvited") & \
(ctable.deleted == False)
row = db(query).select(ctable.doc_id,
ctable.body,
join = join,
limitby = (0, 1),
).first()
if row:
message_template = row.body
else:
# Disabled
return
# Look up attachments
dtable = s3db.doc_document
query = (dtable.doc_id == row.doc_id) & \
(dtable.file != None) & (dtable.file != "") & \
(dtable.deleted == False)
rows = db(query).select(dtable.file)
attachments = []
for row in rows:
filename, stream = dtable.file.retrieve(row.file)
attachments.append(current.mail.Attachment(stream, filename=filename))
# Default subject from auth.messages
system_name = s3_str(settings.get_system_name())
subject = s3_str(auth_messages.welcome_email_subject % \
{"system_name": system_name})
# Custom message body
data = {"system_name": system_name,
"url": settings.get_base_public_url(),
"profile": URL("default", "person", host=True),
}
message = formatmap(message_template, data)
# Send email
success = current.msg.send_email(to = recipient,
subject = subject,
message = message,
attachments = attachments,
)
if not success:
current.response.error = auth_messages.unable_send_email
# -------------------------------------------------------------------------
@classmethod
def account(cls, key, code):
"""
Find the account matching registration key and code
@param key: the registration key (from URL args)
@param code: the registration code (from form)
"""
if key and code:
utable = current.auth.settings.table_user
query = (utable.registration_key == cls.keyhash(key, code))
account = current.db(query).select(utable.ALL, limitby=(0, 1)).first()
else:
account = None
return account
# -------------------------------------------------------------------------
@staticmethod
def formfields():
"""
Generate the form fields for the registration form
@returns: a tuple (formfields, required_fields)
- formfields = list of form fields
- required_fields = list of field names of required fields
"""
T = current.T
request = current.request
s3db = current.s3db
auth = current.auth
auth_settings = auth.settings
auth_messages = auth.messages
utable = auth_settings.table_user
passfield = auth_settings.password_field
# Last name is required
utable.last_name.requires = IS_NOT_EMPTY(error_message=T("input required"))
# Don't check for duplicate email (will be done in onvalidation)
# => user might choose to use the current email address of the account
# => if registration key or code are invalid, we don't want to give away
# any existing email addresses
utable.email.requires = [IS_EMAIL(error_message = auth_messages.invalid_email),
IS_LOWER(),
]
# Instantiate Consent Tracker
consent = s3db.auth_Consent(processing_types=["STORE", "RULES_ISS"])
# Form fields
formfields = [utable.first_name,
utable.last_name,
utable.email,
utable[passfield],
Field("password_two", "password",
label = auth_messages.verify_password,
requires = IS_EXPR("value==%s" % \
repr(request.vars.get(passfield)),
error_message = auth_messages.mismatched_password,
),
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (auth_messages.verify_password,
T("Enter the same password again"),
),
),
),
Field("code",
label = T("Registration Code"),
requires = IS_NOT_EMPTY(),
),
Field("consent",
label = T("Consent"),
widget = consent.widget,
),
]
# Required fields
required_fields = ["first_name",
"last_name",
]
return formfields, required_fields
# -------------------------------------------------------------------------
@staticmethod
def keyhash(key, code):
"""
Generate a hash of the activation code using
the registration key
@param key: the registration key
@param code: the activation code
@returns: the hash as string
"""
crypt = CRYPT(key=key, digest_alg="sha512", salt=None)
return str(crypt(code.upper())[0])
# -------------------------------------------------------------------------
@staticmethod
def customise_auth_messages():
"""
Customise auth messages:
- welcome email subject
"""
messages = current.auth.messages
messages.welcome_email_subject = "Welcome to the %(system_name)s Portal"
# =============================================================================
class geocode(S3CustomController):
"""
Custom Geocoder
- looks up Lat/Lon from Postcode &/or Address
- looks up Lx from Lat/Lon
"""
def __call__(self):
vars_get = current.request.post_vars.get
# Validate the formkey
formkey = vars_get("k")
keyname = "_formkey[geocode]"
if not formkey or formkey not in current.session.get(keyname, []):
status = 403
message = current.ERROR.NOT_PERMITTED
headers = {"Content-Type": "application/json"}
current.log.error(message)
raise HTTP(status,
body = current.xml.json_message(success = False,
statuscode = status,
message = message),
web2py_error = message,
**headers)
gis = current.gis
postcode = vars_get("postcode")
address = vars_get("address")
if address:
full_address = "%s %s" %(postcode, address)
else:
full_address = postcode
latlon = gis.geocode(full_address)
if not isinstance(latlon, dict):
output = "{}"
else:
lat = latlon["lat"]
lon = latlon["lon"]
results = gis.geocode_r(lat, lon)
results["lat"] = lat
results["lon"] = lon
from s3.s3xml import SEPARATORS
output = json.dumps(results, separators=SEPARATORS)
current.response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class ocert(S3CustomController):
"""
Custom controller to certify the eligibility of an organisation
to perform certain actions in an external application
Process similar to OAuth:
- external app presents the user a link/redirect to a
Sahana URL containing a purpose code and a one-off token, e.g.
/default/index/ocert?p=XY&t=0123456789ABCDEF
- user follows the link, and is asked by Sahana to login
- once logged-in, Sahana verifies that the user is OrgAdmin
for an organisation that qualifies for the specified purpose,
and if it does, redirects the user back to a URL in the
external app, with a verification hash ("certificate") as
a URL parameter
- the external app requests the OrgID from the user, and
generates a hash of the that OrgID with an appkey and the
session token, and if both hashes match, access to the
intended function is granted
"""
def __call__(self):
db = current.db
s3db = current.s3db
auth = current.auth
session = current.session
# Handle purpose code and token in URL
get_vars = current.request.get_vars
purpose = get_vars.get("p")
token = get_vars.get("t")
if purpose or token:
if not purpose or not token:
self._error("Invalid Request - Missing Parameter")
session.s3.ocert = {"p": purpose, "t": token}
redirect(URL(args=["ocert"], vars={}))
# Check that function is configured
ocert = current.deployment_settings.get_custom(key="ocert")
if not isinstance(ocert, dict):
self._error("Function not available")
# Read key from session, extract purpose code and token
keys = session.s3.get("ocert")
try:
purpose, token = keys.get("p"), keys.get("t")
except (TypeError, AttributeError):
self._error("Invalid Request - Missing Parameter")
# Verify purpose
try:
appkey, redirect_uri = ocert.get(purpose)
except (TypeError, ValueError):
appkey, redirect_uri = None, None
if not appkey or not redirect_uri:
self._error("Invalid Parameter")
# Validate token
# - must be a 64 to 256 bit hex-encoded number
token_value = None
if 16 <= len(token) <= 64:
try:
token_value = int(token, 16)
except (TypeError, ValueError):
pass
if not token_value:
self._error("Invalid Parameter")
# Determine the organisation to check
organisation_id = None
if auth.s3_logged_in():
# Must be ORG_ADMIN
if not auth.s3_has_role("ORG_ADMIN"):
self._error("Insufficient Privileges")
# Must manage at least one organisation
managed_orgs = None
user = auth.user
sr = auth.get_system_roles()
realms = user.realms.get(sr.ORG_ADMIN)
if not realms:
realms = s3db.pr_realm(user.pe_id)
if realms:
# Look up managed organisations
otable = s3db.org_organisation
query = (otable.pe_id.belongs(realms)) & \
(otable.deleted == False)
managed_orgs = db(query).select(otable.id,
otable.name,
)
if not managed_orgs:
self._error("No Managed Organizations")
elif len(managed_orgs) == 1:
# Only one managed org
organisation_id = managed_orgs.first().id
else:
# Let user select the organisation
form = self._org_select(managed_orgs)
if form.accepts(current.request.vars,
session,
formname = "org_select",
):
organisation_id = form.vars.organisation_id
else:
self._view(THEME, "register.html")
output = {"title": current.T("Select Organization"),
"intro": None,
"form": form,
}
else:
# Go to login, then return here
redirect(URL(c = "default",
f = "user",
args = ["login"],
vars = {"_next": URL(args=["ocert"], vars={})},
))
if organisation_id:
# Remove ocert key from session
del session.s3.ocert
# Generate verification hash
vhash = self._vhash(organisation_id, purpose, token, appkey)
if vhash:
from s3compat import urllib_quote
url = redirect_uri % {"token": urllib_quote(token),
"vhash": urllib_quote(vhash),
}
redirect(url)
else:
# Organisation is not authorized for the purpose
self._error("Organization not authorized")
return output
# -------------------------------------------------------------------------
@staticmethod
def _vhash(organisation_id, purpose, token, appkey):
"""
Verify the qualification of the organisation for the purpose,
and generate a verification hash (=encrypt the OrgID with the
appkey, salted with the token) if successful
@param organisation_id: the organisation_id
@param purpose: the purpose code
@param token: the token
@param appkey: the appkey
@returns: the encrypted certificate if the organisation
qualifies, otherwise None
"""
if not all((purpose, token, appkey)):
return None
db = current.db
s3db = current.s3db
# Look up the organisation ID tag
ttable = s3db.org_organisation_tag
query = (ttable.organisation_id == organisation_id) & \
(ttable.tag == "OrgID") & \
(ttable.deleted == False)
orgid = db(query).select(ttable.value,
limitby = (0, 1),
).first()
if not orgid or not orgid.value:
return None
if purpose == "KVREG":
# Must be a TESTSTATIONS organisation
gtable = s3db.org_group
mtable = s3db.org_group_membership
query = (mtable.organisation_id == organisation_id) & \
(mtable.deleted == False) & \
(gtable.id == mtable.group_id) & \
(gtable.name == TESTSTATIONS)
row = db(query).select(mtable.id, limitby=(0, 1)).first()
if not row:
return None
# Must be partner in the TESTS-PUBLIC project
ptable = s3db.project_project
ltable = s3db.project_organisation
query = (ltable.organisation_id == organisation_id) & \
(ltable.deleted == False) & \
(ptable.id == ltable.project_id) & \
(ptable.code == "TESTS-PUBLIC")
row = db(query).select(ltable.id, limitby=(0, 1)).first()
if not row:
return None
# Generate vhash
crypt = CRYPT(key = appkey,
digest_alg = "sha512",
salt = token,
)
return str(crypt(orgid.value)[0]).rsplit("$")[-1]
# -------------------------------------------------------------------------
@staticmethod
def _org_select(organisations):
"""
Render a form for the user to select one of their managed
organisations
@param organisations: the managed organisations, Rows {id, name}
@returns: a FORM
"""
T = current.T
response = current.response
settings = current.deployment_settings
options = {row.id: row.name for row in organisations}
formfields = [Field("organisation_id",
label = T("Organization"),
requires = IS_IN_SET(options),
),
]
# Generate labels (and mark required fields in the process)
labels = s3_mark_required(formfields)[0]
response.s3.has_required = False
# Form buttons
SUBMIT = T("Continue")
buttons = [INPUT(_type = "submit",
_value = SUBMIT,
),
]
# Construct the form
response.form_label_separator = ""
form = SQLFORM.factory(table_name = "organisation",
record = None,
labels = labels,
separator = "",
showid = False,
submit_button = SUBMIT,
formstyle = settings.get_ui_formstyle(),
buttons = buttons,
*formfields)
return form
# -------------------------------------------------------------------------
@staticmethod
def _error(message):
"""
Redirect to home page with error message
@param message: the error message
"""
current.session.error = current.T(message)
redirect(URL(c="default", f="index"))
# END =========================================================================
| 39.681102 | 147 | 0.43676 |
3390c19a4677efe819b596dadc40b2f2972cdfb7 | 9,310 | py | Python | exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin/encoder/__init__.py | mariojonke/opentelemetry-python | 300ce1bc601440e5560523296b426592f54bf7c5 | [
"Apache-2.0"
] | 1 | 2021-08-20T04:34:28.000Z | 2021-08-20T04:34:28.000Z | exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin/encoder/__init__.py | harshita19244/opentelemetry-python | e2a5b0b7439acfc16d474fdd0adb3fca41308e80 | [
"Apache-2.0"
] | null | null | null | exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin/encoder/__init__.py | harshita19244/opentelemetry-python | e2a5b0b7439acfc16d474fdd0adb3fca41308e80 | [
"Apache-2.0"
] | null | null | null | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Zipkin Exporter Transport Encoder
Base module and abstract class for concrete transport encoders to extend.
"""
import abc
import json
import logging
from enum import Enum
from typing import Any, Dict, List, Optional, Sequence, TypeVar
from opentelemetry.exporter.zipkin.node_endpoint import NodeEndpoint
from opentelemetry.sdk.trace import Event
from opentelemetry.trace import (
Span,
SpanContext,
StatusCode,
format_span_id,
format_trace_id,
)
EncodedLocalEndpointT = TypeVar("EncodedLocalEndpointT")
DEFAULT_MAX_TAG_VALUE_LENGTH = 128
NAME_KEY = "otel.library.name"
VERSION_KEY = "otel.library.version"
logger = logging.getLogger(__name__)
class Protocol(Enum):
"""Enum of supported protocol formats.
Values are human-readable strings so that they can be easily used by the
OS environ var OTEL_EXPORTER_ZIPKIN_PROTOCOL (reserved for future usage).
"""
V1 = "v1"
V2 = "v2"
# pylint: disable=W0223
class Encoder(abc.ABC):
"""Base class for encoders that are used by the exporter.
Args:
max_tag_value_length: maximum length of an exported tag value. Values
will be truncated to conform. Since values are serialized to a JSON
list string, max_tag_value_length is honored at the element boundary.
"""
def __init__(
self, max_tag_value_length: int = DEFAULT_MAX_TAG_VALUE_LENGTH
):
self.max_tag_value_length = max_tag_value_length
@staticmethod
@abc.abstractmethod
def content_type() -> str:
pass
@abc.abstractmethod
def serialize(
self, spans: Sequence[Span], local_endpoint: NodeEndpoint
) -> str:
pass
@abc.abstractmethod
def _encode_span(
self, span: Span, encoded_local_endpoint: EncodedLocalEndpointT
) -> Any:
"""
Per spec Zipkin fields that can be absent SHOULD be omitted from the
payload when they are empty in the OpenTelemetry Span.
https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/sdk_exporters/zipkin.md#request-payload
"""
@staticmethod
@abc.abstractmethod
def _encode_local_endpoint(
local_endpoint: NodeEndpoint,
) -> EncodedLocalEndpointT:
pass
@staticmethod
def _encode_debug(span_context) -> Any:
return span_context.trace_flags.sampled
@staticmethod
@abc.abstractmethod
def _encode_span_id(span_id: int) -> Any:
pass
@staticmethod
@abc.abstractmethod
def _encode_trace_id(trace_id: int) -> Any:
pass
@staticmethod
def _get_parent_id(span_context) -> Optional[int]:
if isinstance(span_context, Span):
parent_id = span_context.parent.span_id
elif isinstance(span_context, SpanContext):
parent_id = span_context.span_id
else:
parent_id = None
return parent_id
def _extract_tags_from_dict(
self, tags_dict: Optional[Dict]
) -> Dict[str, str]:
tags = {}
if not tags_dict:
return tags
for attribute_key, attribute_value in tags_dict.items():
if isinstance(attribute_value, bool):
value = str(attribute_value).lower()
elif isinstance(attribute_value, (int, float, str)):
value = str(attribute_value)
elif isinstance(attribute_value, Sequence):
value = self._extract_tag_value_string_from_sequence(
attribute_value
)
if not value:
logger.warning("Could not serialize tag %s", attribute_key)
continue
else:
logger.warning("Could not serialize tag %s", attribute_key)
continue
if (
self.max_tag_value_length is not None
and self.max_tag_value_length > 0
):
value = value[: self.max_tag_value_length]
tags[attribute_key] = value
return tags
def _extract_tag_value_string_from_sequence(self, sequence: Sequence):
if self.max_tag_value_length and self.max_tag_value_length == 1:
return None
tag_value_elements = []
running_string_length = (
2 # accounts for array brackets in output string
)
defined_max_tag_value_length = (
self.max_tag_value_length is not None
and self.max_tag_value_length > 0
)
for element in sequence:
if isinstance(element, bool):
tag_value_element = str(element).lower()
elif isinstance(element, (int, float, str)):
tag_value_element = str(element)
elif element is None:
tag_value_element = None
else:
continue
if defined_max_tag_value_length:
if tag_value_element is None:
running_string_length += 4 # null with no quotes
else:
# + 2 accounts for string quotation marks
running_string_length += len(tag_value_element) + 2
if tag_value_elements:
# accounts for ',' item separator
running_string_length += 1
if running_string_length > self.max_tag_value_length:
break
tag_value_elements.append(tag_value_element)
return json.dumps(tag_value_elements, separators=(",", ":"))
def _extract_tags_from_span(self, span: Span) -> Dict[str, str]:
tags = self._extract_tags_from_dict(span.attributes)
if span.resource:
tags.update(self._extract_tags_from_dict(span.resource.attributes))
if span.instrumentation_info is not None:
tags.update(
{
NAME_KEY: span.instrumentation_info.name,
VERSION_KEY: span.instrumentation_info.version,
}
)
if span.status.status_code is not StatusCode.UNSET:
tags.update({"otel.status_code": span.status.status_code.name})
if span.status.status_code is StatusCode.ERROR:
tags.update({"error": span.status.description or ""})
return tags
def _extract_annotations_from_events(
self, events: Optional[List[Event]]
) -> Optional[List[Dict]]:
if not events:
return None
annotations = []
for event in events:
attrs = {}
for key, value in event.attributes.items():
if (
isinstance(value, str)
and self.max_tag_value_length is not None
and self.max_tag_value_length > 0
):
value = value[: self.max_tag_value_length]
attrs[key] = value
annotations.append(
{
"timestamp": self._nsec_to_usec_round(event.timestamp),
"value": json.dumps({event.name: attrs}, sort_keys=True),
}
)
return annotations
@staticmethod
def _nsec_to_usec_round(nsec: int) -> int:
"""Round nanoseconds to microseconds
Timestamp in zipkin spans is int of microseconds.
See: https://zipkin.io/pages/instrumenting.html
"""
return (nsec + 500) // 10 ** 3
class JsonEncoder(Encoder):
@staticmethod
def content_type():
return "application/json"
def serialize(
self, spans: Sequence[Span], local_endpoint: NodeEndpoint
) -> str:
encoded_local_endpoint = self._encode_local_endpoint(local_endpoint)
encoded_spans = []
for span in spans:
encoded_spans.append(
self._encode_span(span, encoded_local_endpoint)
)
return json.dumps(encoded_spans)
@staticmethod
def _encode_local_endpoint(local_endpoint: NodeEndpoint) -> Dict:
encoded_local_endpoint = {"serviceName": local_endpoint.service_name}
if local_endpoint.ipv4 is not None:
encoded_local_endpoint["ipv4"] = str(local_endpoint.ipv4)
if local_endpoint.ipv6 is not None:
encoded_local_endpoint["ipv6"] = str(local_endpoint.ipv6)
if local_endpoint.port is not None:
encoded_local_endpoint["port"] = local_endpoint.port
return encoded_local_endpoint
@staticmethod
def _encode_span_id(span_id: int) -> str:
return format_span_id(span_id)
@staticmethod
def _encode_trace_id(trace_id: int) -> str:
return format_trace_id(trace_id)
| 33.014184 | 141 | 0.627497 |
0ff81139fa6617afd981b05ff182a2e1f27c1768 | 2,928 | py | Python | dpnp/dpnp_iface_libmath.py | mfkiwl/dpnp | a963646c3082b7ec6fe4b40c4df2a95d3a59fc95 | [
"BSD-2-Clause"
] | 37 | 2020-09-08T00:38:52.000Z | 2022-03-18T01:44:10.000Z | dpnp/dpnp_iface_libmath.py | mfkiwl/dpnp | a963646c3082b7ec6fe4b40c4df2a95d3a59fc95 | [
"BSD-2-Clause"
] | 432 | 2020-09-07T09:48:41.000Z | 2022-03-25T17:50:55.000Z | dpnp/dpnp_iface_libmath.py | mfkiwl/dpnp | a963646c3082b7ec6fe4b40c4df2a95d3a59fc95 | [
"BSD-2-Clause"
] | 17 | 2020-09-07T10:00:34.000Z | 2022-03-25T13:53:43.000Z | # cython: language_level=3
# distutils: language = c++
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2016-2020, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
Interface of the function from Python Math library
Notes
-----
This module is a face or public interface file for the library
it contains:
- Interface functions
- documentation for the functions
- The functions parameters check
"""
import math
from dpnp.dpnp_algo import *
from dpnp.dpnp_utils import *
import dpnp
__all__ = [
"erf"
]
def erf(in_array1):
"""
Returns the error function of complex argument.
For full documentation refer to :obj:`scipy.special.erf`.
Limitations
-----------
Parameter ``in_array1`` is supported as :obj:`dpnp.ndarray`.
Otherwise the function will be executed sequentially on CPU.
Input array data types are limited by supported DPNP :ref:`Data types`.
.. seealso:: :obj:`math.erf`
Examples
--------
>>> import dpnp as np
>>> x = np.linspace(2.0, 3.0, num=5)
>>> [i for i in x]
[2.0, 2.25, 2.5, 2.75, 3.0]
>>> out = np.erf(x)
>>> [i for i in out]
[0.99532227, 0.99853728, 0.99959305, 0.99989938, 0.99997791]
"""
x1_desc = dpnp.get_dpnp_descriptor(in_array1)
if x1_desc:
return dpnp_erf(x1_desc).get_pyobj()
result = create_output_descriptor_py(in_array1.shape, in_array1.dtype, None).get_pyobj()
for i in range(result.size):
result[i] = math.erf(in_array1[i])
return result
| 32.898876 | 92 | 0.677254 |
2d37b27339dd27c534e2a0daf44ce0129bc8d8a8 | 11,400 | py | Python | pay-api/src/pay_api/models/payment.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
] | null | null | null | pay-api/src/pay_api/models/payment.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
] | null | null | null | pay-api/src/pay_api/models/payment.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
] | null | null | null | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model to handle all operations related to Payment data."""
from datetime import datetime
from typing import Dict
import pytz
from flask import current_app
from marshmallow import fields
from sqlalchemy import ForeignKey
from sqlalchemy import func, or_
from sqlalchemy.orm import relationship
from pay_api.utils.enums import InvoiceReferenceStatus, PaymentMethod as PaymentMethodEnum, PaymentStatus
from pay_api.utils.util import get_first_and_last_dates_of_month, get_str_by_path, get_week_start_and_end_date
from .base_model import BaseModel
from .base_schema import BaseSchema
from .db import db
from .invoice import Invoice
from .payment_account import PaymentAccount
from .payment_method import PaymentMethod
from .payment_status_code import PaymentStatusCode
from .payment_system import PaymentSystem
from .invoice_reference import InvoiceReference
class Payment(BaseModel): # pylint: disable=too-many-instance-attributes
"""This class manages all of the base data about Payment ."""
__tablename__ = 'payments'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
payment_system_code = db.Column(db.String(10), ForeignKey('payment_systems.code'), nullable=False)
payment_account_id = db.Column(db.Integer, ForeignKey('payment_accounts.id'), nullable=True)
payment_method_code = db.Column(db.String(15), ForeignKey('payment_methods.code'), nullable=False)
payment_status_code = db.Column(db.String(20), ForeignKey('payment_status_codes.code'), nullable=True)
invoice_number = db.Column(db.String(50), nullable=True, index=True)
receipt_number = db.Column(db.String(50), nullable=True, index=True)
cons_inv_number = db.Column(db.String(50), nullable=True, index=True)
invoice_amount = db.Column(db.Numeric(), nullable=True)
paid_amount = db.Column(db.Numeric(), nullable=True)
created_by = db.Column(db.String(50), default='SYSTEM')
completed_on = db.Column(db.DateTime, nullable=True)
payment_system = relationship(PaymentSystem, foreign_keys=[payment_system_code], lazy='select', innerjoin=True)
payment_status = relationship(PaymentStatusCode, foreign_keys=[payment_status_code], lazy='select', innerjoin=True)
@classmethod
def find_payment_method_by_payment_id(cls, identifier: int):
"""Return a Payment by id."""
query = db.session.query(PaymentMethod) \
.join(Payment) \
.filter(PaymentMethod.code == Payment.payment_method_code) \
.filter(Payment.id == identifier)
return query.one_or_none()
@classmethod
def find_payment_by_invoice_number_and_status(cls, inv_number: str, payment_status: str):
"""Return a Payment by invoice_number and status."""
query = db.session.query(Payment) \
.filter(Payment.invoice_number == inv_number) \
.filter(Payment.payment_status_code == payment_status)
return query.all()
@classmethod
def find_payment_by_receipt_number(cls, receipt_number: str):
"""Return a Payment by receipt_number."""
return db.session.query(Payment).filter(Payment.receipt_number == receipt_number).one_or_none()
@classmethod
def find_payment_for_invoice(cls, invoice_id: int):
"""Find payment records created for the invoice."""
query = db.session.query(Payment) \
.join(InvoiceReference, InvoiceReference.invoice_number == Payment.invoice_number) \
.join(Invoice, InvoiceReference.invoice_id == Invoice.id) \
.filter(Invoice.id == invoice_id) \
.filter(InvoiceReference.status_code.
in_([InvoiceReferenceStatus.ACTIVE.value, InvoiceReferenceStatus.COMPLETED.value]))
return query.one_or_none()
@classmethod
def search_account_payments(cls, auth_account_id: str, payment_status: str, page: int, limit: int):
"""Search payment records created for the account."""
query = db.session.query(Payment, Invoice) \
.join(PaymentAccount, PaymentAccount.id == Payment.payment_account_id) \
.outerjoin(InvoiceReference, InvoiceReference.invoice_number == Payment.invoice_number) \
.outerjoin(Invoice, InvoiceReference.invoice_id == Invoice.id) \
.filter(PaymentAccount.auth_account_id == auth_account_id)
# TODO handle other status and conditions gracefully.
if payment_status:
query = query.filter(Payment.payment_status_code == payment_status)
if payment_status == PaymentStatus.FAILED.value:
consolidated_inv_subquery = db.session.query(Payment.invoice_number) \
.filter(Payment.payment_status_code == PaymentStatus.CREATED.value) \
.filter(Payment.payment_method_code == PaymentMethodEnum.CC.value) \
.subquery()
# If call is to get NSF payments, get only active failed payments.
# Exclude any payments which failed first and paid later.
query = query.filter(or_(InvoiceReference.status_code == InvoiceReferenceStatus.ACTIVE.value,
Payment.cons_inv_number.in_(consolidated_inv_subquery)))
query = query.order_by(Payment.id.asc())
pagination = query.paginate(per_page=limit, page=page)
result, count = pagination.items, pagination.total
return result, count
@classmethod
def find_payments_to_consolidate(cls, auth_account_id: str):
"""Find payments to be consolidated."""
consolidated_inv_subquery = db.session.query(Payment.cons_inv_number)\
.filter(Payment.payment_status_code == PaymentStatus.FAILED.value)\
.filter(Payment.payment_method_code == PaymentMethodEnum.PAD.value)\
.subquery()
query = db.session.query(Payment) \
.join(PaymentAccount, PaymentAccount.id == Payment.payment_account_id) \
.outerjoin(InvoiceReference, InvoiceReference.invoice_number == Payment.invoice_number) \
.filter(InvoiceReference.status_code == InvoiceReferenceStatus.ACTIVE.value) \
.filter(PaymentAccount.auth_account_id == auth_account_id) \
.filter(or_(Payment.payment_status_code == PaymentStatus.FAILED.value,
Payment.invoice_number.in_(consolidated_inv_subquery)))
return query.all()
@classmethod
def search_purchase_history(cls, # pylint:disable=too-many-arguments, too-many-locals, too-many-branches
auth_account_id: str, search_filter: Dict,
page: int, limit: int, return_all: bool, max_no_records: int = 0):
"""Search for purchase history."""
query = db.session.query(Invoice) \
.outerjoin(PaymentAccount, Invoice.payment_account_id == PaymentAccount.id) \
.filter(PaymentAccount.auth_account_id == auth_account_id)
if search_filter.get('status', None):
query = query.filter(Invoice.invoice_status_code == search_filter.get('status'))
if search_filter.get('folioNumber', None):
query = query.filter(Invoice.folio_number == search_filter.get('folioNumber'))
if search_filter.get('businessIdentifier', None):
query = query.filter(Invoice.business_identifier == search_filter.get('businessIdentifier'))
if search_filter.get('createdBy', None): # pylint: disable=no-member
query = query.filter(
Invoice.created_name.ilike('%' + search_filter.get('createdBy') + '%')) # pylint: disable=no-member
# Find start and end dates
created_from: datetime = None
created_to: datetime = None
if get_str_by_path(search_filter, 'dateFilter/startDate'):
created_from = datetime.strptime(get_str_by_path(search_filter, 'dateFilter/startDate'), '%m/%d/%Y')
if get_str_by_path(search_filter, 'dateFilter/endDate'):
created_to = datetime.strptime(get_str_by_path(search_filter, 'dateFilter/endDate'), '%m/%d/%Y')
if get_str_by_path(search_filter, 'weekFilter/index'):
created_from, created_to = get_week_start_and_end_date(
int(get_str_by_path(search_filter, 'weekFilter/index')))
if get_str_by_path(search_filter, 'monthFilter/month') and get_str_by_path(search_filter, 'monthFilter/year'):
month = int(get_str_by_path(search_filter, 'monthFilter/month'))
year = int(get_str_by_path(search_filter, 'monthFilter/year'))
created_from, created_to = get_first_and_last_dates_of_month(month=month, year=year)
if created_from and created_to:
# Truncate time for from date and add max time for to date
tz_name = current_app.config['LEGISLATIVE_TIMEZONE']
tz_local = pytz.timezone(tz_name)
created_from = created_from.replace(hour=0, minute=0, second=0, microsecond=0).astimezone(tz_local)
created_to = created_to.replace(hour=23, minute=59, second=59, microsecond=999999).astimezone(tz_local)
query = query.filter(
func.timezone(tz_name, func.timezone('UTC', Invoice.created_on)).between(created_from, created_to))
# Add ordering
query = query.order_by(Invoice.created_on.desc())
if not return_all:
# Add pagination
pagination = query.paginate(per_page=limit, page=page)
result, count = pagination.items, pagination.total
# If maximum number of records is provided, return it as total
if max_no_records > 0:
count = max_no_records if max_no_records < count else count
else:
# If maximum number of records is provided, set the page with that number
if max_no_records > 0:
pagination = query.paginate(per_page=max_no_records, page=1)
result, count = pagination.items, max_no_records
else:
result = query.all()
count = len(result)
return result, count
class PaymentSchema(BaseSchema): # pylint: disable=too-many-ancestors
"""Main schema used to serialize the Payment."""
class Meta: # pylint: disable=too-few-public-methods
"""Returns all the fields from the SQLAlchemy class."""
model = Payment
exclude = ['payment_system', 'payment_status', 'payment_account_id', 'cons_inv_number']
payment_system_code = fields.String(data_key='payment_system')
payment_method_code = fields.String(data_key='payment_method')
payment_status_code = fields.String(data_key='status_code')
invoice_amount = fields.Float(data_key='invoice_amount')
paid_amount = fields.Float(data_key='paid_amount')
| 51.351351 | 119 | 0.69307 |
f8a9eaafd6d169c227b680d9a44f503018ff7617 | 8,222 | py | Python | reference/train.py | BoChenYS/ROPE | 3e50f134259b555cf547e4a3ef8b14cf5cda4e00 | [
"BSD-3-Clause"
] | 6 | 2022-01-12T05:59:20.000Z | 2022-03-31T02:33:23.000Z | reference/train.py | BoChenYS/ROPE | 3e50f134259b555cf547e4a3ef8b14cf5cda4e00 | [
"BSD-3-Clause"
] | null | null | null | reference/train.py | BoChenYS/ROPE | 3e50f134259b555cf547e4a3ef8b14cf5cda4e00 | [
"BSD-3-Clause"
] | null | null | null | r"""PyTorch Detection Training.
To run in a multi-gpu environment, use the distributed launcher::
python -m torch.distributed.launch --nproc_per_node=$NGPU --use_env \
train.py ... --world-size $NGPU
The default hyperparameters are tuned for training on 8 gpus and 2 images per gpu.
--lr 0.02 --batch-size 2 --world-size 8
If you use different number of gpus, the learning rate should be changed to 0.02/8*$NGPU.
On top of that, for training Faster/Mask R-CNN, the default hyperparameters are
--epochs 26 --lr-steps 16 22 --aspect-ratio-group-factor 3
Also, if you train Keypoint R-CNN, the default hyperparameters are
--epochs 46 --lr-steps 36 43 --aspect-ratio-group-factor 3
Because the number of images is smaller in the person keypoint subset of COCO,
the number of epochs should be adapted so that we have the same number of iterations.
"""
import datetime
import os
import time
import torch
import torch.utils.data
from torch import nn
import torchvision
import torchvision.models.detection
import torchvision.models.detection.mask_rcnn
from coco_utils import get_coco, get_coco_kp
from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
from engine import train_one_epoch, evaluate
from . import utils
from . import transforms as T
def get_dataset(name, image_set, transform, data_path):
paths = {
"coco": (data_path, get_coco, 91),
"coco_kp": (data_path, get_coco_kp, 2)
}
p, ds_fn, num_classes = paths[name]
ds = ds_fn(p, image_set=image_set, transforms=transform)
return ds, num_classes
def get_transform(train):
transforms = []
transforms.append(T.ToTensor())
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# Data loading code
print("Loading data")
dataset, num_classes = get_dataset(args.dataset, "train", get_transform(train=True), args.data_path)
dataset_test, _ = get_dataset(args.dataset, "val", get_transform(train=False), args.data_path)
print("Creating data loaders")
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test)
else:
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
if args.aspect_ratio_group_factor >= 0:
group_ids = create_aspect_ratio_groups(dataset, k=args.aspect_ratio_group_factor)
train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, args.batch_size)
else:
train_batch_sampler = torch.utils.data.BatchSampler(
train_sampler, args.batch_size, drop_last=True)
data_loader = torch.utils.data.DataLoader(
dataset, batch_sampler=train_batch_sampler, num_workers=args.workers,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1,
sampler=test_sampler, num_workers=args.workers,
collate_fn=utils.collate_fn)
print("Creating model")
model = torchvision.models.detection.__dict__[args.model](num_classes=num_classes,
pretrained=args.pretrained)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(
params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.test_only:
evaluate(model, data_loader_test, device=device)
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq)
lr_scheduler.step()
if args.output_dir:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'args': args,
'epoch': epoch},
os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))
# evaluate after every epoch
evaluate(model, data_loader_test, device=device)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description=__doc__)
parser.add_argument('--data-path', default='/datasets01/COCO/022719/', help='dataset')
parser.add_argument('--dataset', default='coco', help='dataset')
parser.add_argument('--model', default='maskrcnn_resnet50_fpn', help='model')
parser.add_argument('--device', default='cuda', help='device')
parser.add_argument('-b', '--batch-size', default=2, type=int,
help='images per gpu, the total batch size is $NGPU x batch_size')
parser.add_argument('--epochs', default=26, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--lr', default=0.02, type=float,
help='initial learning rate, 0.02 is the default value for training '
'on 8 gpus and 2 images_per_gpu')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--lr-step-size', default=8, type=int, help='decrease lr every step-size epochs')
parser.add_argument('--lr-steps', default=[16, 22], nargs='+', type=int, help='decrease lr every step-size epochs')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--print-freq', default=20, type=int, help='print frequency')
parser.add_argument('--output-dir', default='.', help='path where to save')
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
parser.add_argument('--aspect-ratio-group-factor', default=3, type=int)
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true",
)
parser.add_argument(
"--pretrained",
dest="pretrained",
help="Use pre-trained models from the modelzoo",
action="store_true",
)
# distributed training parameters
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
args = parser.parse_args()
if args.output_dir:
utils.mkdir(args.output_dir)
main(args)
| 40.70297 | 119 | 0.677816 |
8d161b72fa47a578395dc5e0b2dde178f56298ac | 2,107 | py | Python | lib/spack/spack/hooks/monitor.py | HigherOrderMethods/spack | 87ed3fcc59fc25ce250042338d082925e3a3610b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-10T13:47:48.000Z | 2019-04-17T13:05:17.000Z | lib/spack/spack/hooks/monitor.py | HigherOrderMethods/spack | 87ed3fcc59fc25ce250042338d082925e3a3610b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 15 | 2021-04-14T12:34:46.000Z | 2022-03-02T19:08:00.000Z | lib/spack/spack/hooks/monitor.py | Kerilk/spack | e027942b55407a4a5fe323b93d8e57200c873a43 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2018-04-06T09:04:11.000Z | 2020-01-24T12:52:12.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import llnl.util.tty as tty
import spack.monitor
def on_install_start(spec):
"""On start of an install, we want to ping the server if it exists
"""
if not spack.monitor.cli:
return
tty.debug("Running on_install_start for %s" % spec)
build_id = spack.monitor.cli.new_build(spec)
tty.verbose("Build created with id %s" % build_id)
def on_install_success(spec):
"""On the success of an install (after everything is complete)
"""
if not spack.monitor.cli:
return
tty.debug("Running on_install_success for %s" % spec)
result = spack.monitor.cli.update_build(spec, status="SUCCESS")
tty.verbose(result.get('message'))
def on_install_failure(spec):
"""Triggered on failure of an install
"""
if not spack.monitor.cli:
return
tty.debug("Running on_install_failure for %s" % spec)
result = spack.monitor.cli.fail_task(spec)
tty.verbose(result.get('message'))
def on_phase_success(pkg, phase_name, log_file):
"""Triggered on a phase success
"""
if not spack.monitor.cli:
return
tty.debug("Running on_phase_success %s, phase %s" % (pkg.name, phase_name))
result = spack.monitor.cli.send_phase(pkg, phase_name, log_file, "SUCCESS")
tty.verbose(result.get('message'))
def on_phase_error(pkg, phase_name, log_file):
"""Triggered on a phase error
"""
if not spack.monitor.cli:
return
tty.debug("Running on_phase_error %s, phase %s" % (pkg.name, phase_name))
result = spack.monitor.cli.send_phase(pkg, phase_name, log_file, "ERROR")
tty.verbose(result.get('message'))
def on_analyzer_save(pkg, result):
"""given a package and a result, if we have a spack monitor, upload
the result to it.
"""
if not spack.monitor.cli:
return
# This hook runs after a save result
spack.monitor.cli.send_analyze_metadata(pkg, result)
| 28.472973 | 79 | 0.68486 |
5e2b2f0e5c9cdd14dd3e3052e5392fdfa1eb0061 | 1,115 | py | Python | talipp/indicators/CHOP.py | solocarrie/talipp | a35bbc33444c56683d4e26439f4878e92b937d7f | [
"MIT"
] | 54 | 2020-11-19T02:27:04.000Z | 2022-02-22T06:31:05.000Z | talipp/indicators/CHOP.py | justin-pierce/talipp | f5296381e3f4270b7743694e2ab5a0da301bdaf3 | [
"MIT"
] | 24 | 2020-11-01T17:56:28.000Z | 2021-09-15T18:40:04.000Z | talipp/indicators/CHOP.py | justin-pierce/talipp | f5296381e3f4270b7743694e2ab5a0da301bdaf3 | [
"MIT"
] | 14 | 2020-12-10T22:43:37.000Z | 2022-01-15T22:23:42.000Z | from math import log10
from typing import List, Any
from talipp.indicators.Indicator import Indicator
from talipp.indicators.ATR import ATR
from talipp.ohlcv import OHLCV
class CHOP(Indicator):
"""
Choppiness Index
Output: a list of OHLCV objects
"""
def __init__(self, period: int, input_values: List[OHLCV] = None):
super().__init__()
self.period = period
self.atr = ATR(1)
self.add_sub_indicator(self.atr)
self.initialize(input_values)
def _calculate_new_value(self) -> Any:
if len(self.atr) < self.period or len(self.input_values) < self.period:
return None
max_high = max(self.input_values[-self.period:], key = lambda x: x.high).high
min_low = min(self.input_values[-self.period:], key = lambda x: x.low).low
if max_high != min_low:
return 100.0 * log10(sum(self.atr[-self.period:]) / (max_high - min_low) ) / log10(self.period)
else:
if len(self.output_values) > 0:
return self.output_values[-1]
else:
return None | 28.589744 | 107 | 0.622422 |
f20e5ce021f8a4b59cc652737f5aac27463109fa | 11,212 | py | Python | mezzanine/generic/fields.py | shangzhikeji/mezzanine | 4dfd078d64236dfd4a9d67587ad20562801d0932 | [
"BSD-2-Clause"
] | 2 | 2019-01-19T09:00:20.000Z | 2019-03-13T01:09:36.000Z | mezzanine/generic/fields.py | shangzhikeji/mezzanine | 4dfd078d64236dfd4a9d67587ad20562801d0932 | [
"BSD-2-Clause"
] | null | null | null | mezzanine/generic/fields.py | shangzhikeji/mezzanine | 4dfd078d64236dfd4a9d67587ad20562801d0932 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import division, unicode_literals
from future.builtins import str
from copy import copy
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ImproperlyConfigured, AppRegistryNotReady
from django.db.models import IntegerField, CharField, FloatField
from django.db.models.signals import post_save, post_delete
from mezzanine.utils.deprecation import get_related_model
class BaseGenericRelation(GenericRelation):
"""
Extends ``GenericRelation`` to:
- Add a consistent default value for ``object_id_field`` and
check for a ``default_related_model`` attribute which can be
defined on subclasses as a default for the ``to`` argument.
- Add one or more custom fields to the model that the relation
field is applied to, and then call a ``related_items_changed``
method each time related items are saved or deleted, so that a
calculated value can be stored against the custom fields since
aggregates aren't available for GenericRelation instances.
"""
# Mapping of field names to model fields that will be added.
fields = {}
def __init__(self, *args, **kwargs):
"""
Set up some defaults and check for a ``default_related_model``
attribute for the ``to`` argument.
"""
kwargs.setdefault("object_id_field", "object_pk")
to = getattr(self, "default_related_model", None)
# Avoid having both a positional arg and a keyword arg for
# the parameter ``to``
if to and not args:
kwargs.setdefault("to", to)
try:
# Check if ``related_model`` has been modified by a subclass
self.related_model
except (AppRegistryNotReady, AttributeError):
# if not, all is good
super(BaseGenericRelation, self).__init__(*args, **kwargs)
else:
# otherwise, warn the user to stick to the new (as of 4.0)
# ``default_related_model`` attribute
raise ImproperlyConfigured("BaseGenericRelation changed the "
"way it handled a default ``related_model`` in mezzanine "
"4.0. Please override ``default_related_model`` instead "
"and do not tamper with django's ``related_model`` "
"property anymore.")
def contribute_to_class(self, cls, name):
"""
Add each of the names and fields in the ``fields`` attribute
to the model the relationship field is applied to, and set up
the related item save and delete signals for calling
``related_items_changed``.
"""
for field in cls._meta.many_to_many:
if isinstance(field, self.__class__):
e = "Multiple %s fields are not supported (%s.%s, %s.%s)" % (
self.__class__.__name__, cls.__name__, cls.__name__,
name, field.name)
raise ImproperlyConfigured(e)
self.related_field_name = name
super(BaseGenericRelation, self).contribute_to_class(cls, name)
# Not applicable to abstract classes, and in fact will break.
if not cls._meta.abstract:
for (name_string, field) in self.fields.items():
if "%s" in name_string:
name_string = name_string % name
extant_fields = cls._meta._forward_fields_map
if name_string in extant_fields:
continue
if field.verbose_name is None:
field.verbose_name = self.verbose_name
cls.add_to_class(name_string, copy(field))
# Add a getter function to the model we can use to retrieve
# the field/manager by name.
getter_name = "get_%s_name" % self.__class__.__name__.lower()
cls.add_to_class(getter_name, lambda self: name)
sender = get_related_model(self)
post_save.connect(self._related_items_changed, sender=sender)
post_delete.connect(self._related_items_changed, sender=sender)
def _related_items_changed(self, **kwargs):
"""
Ensure that the given related item is actually for the model
this field applies to, and pass the instance to the real
``related_items_changed`` handler.
"""
for_model = kwargs["instance"].content_type.model_class()
if for_model and issubclass(for_model, self.model):
instance_id = kwargs["instance"].object_pk
try:
instance = for_model.objects.get(id=instance_id)
except self.model.DoesNotExist:
# Instance itself was deleted - signals are irrelevant.
return
if hasattr(instance, "get_content_model"):
instance = instance.get_content_model()
related_manager = getattr(instance, self.related_field_name)
self.related_items_changed(instance, related_manager)
def related_items_changed(self, instance, related_manager):
"""
Can be implemented by subclasses - called whenever the
state of related items change, eg they're saved or deleted.
The instance for this field and the related manager for the
field are passed as arguments.
"""
pass
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
See: https://code.djangoproject.com/ticket/22552
"""
return getattr(obj, self.attname).all()
class CommentsField(BaseGenericRelation):
"""
Stores the number of comments against the
``COMMENTS_FIELD_NAME_count`` field when a comment is saved or
deleted.
"""
default_related_model = "generic.ThreadedComment"
fields = {"%s_count": IntegerField(editable=False, default=0)}
def related_items_changed(self, instance, related_manager):
"""
Stores the number of comments. A custom ``count_filter``
queryset gets checked for, allowing managers to implement
custom count logic.
"""
try:
count = related_manager.count_queryset()
except AttributeError:
count = related_manager.count()
count_field_name = list(self.fields.keys())[0] % \
self.related_field_name
setattr(instance, count_field_name, count)
instance.save()
class KeywordsField(BaseGenericRelation):
"""
Stores the keywords as a single string into the
``KEYWORDS_FIELD_NAME_string`` field for convenient access when
searching.
"""
default_related_model = "generic.AssignedKeyword"
fields = {"%s_string": CharField(editable=False, blank=True,
max_length=500)}
def __init__(self, *args, **kwargs):
"""
Mark the field as editable so that it can be specified in
admin class fieldsets and pass validation, and also so that
it shows up in the admin form.
"""
super(KeywordsField, self).__init__(*args, **kwargs)
self.editable = True
def formfield(self, **kwargs):
"""
Provide the custom form widget for the admin, since there
isn't a form field mapped to ``GenericRelation`` model fields.
"""
from mezzanine.generic.forms import KeywordsWidget
kwargs["widget"] = KeywordsWidget
return super(KeywordsField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
"""
The ``KeywordsWidget`` field will return data as a string of
comma separated IDs for the ``Keyword`` model - convert these
into actual ``AssignedKeyword`` instances. Also delete
``Keyword`` instances if their last related ``AssignedKeyword``
instance is being removed.
"""
from mezzanine.generic.models import Keyword
related_manager = getattr(instance, self.name)
# Get a list of Keyword IDs being removed.
old_ids = [str(a.keyword_id) for a in related_manager.all()]
new_ids = data.split(",")
removed_ids = set(old_ids) - set(new_ids)
# Remove current AssignedKeyword instances.
related_manager.all().delete()
# Convert the data into AssignedKeyword instances.
if data:
data = [related_manager.create(keyword_id=i) for i in new_ids]
# Remove keywords that are no longer assigned to anything.
Keyword.objects.delete_unused(removed_ids)
#super(KeywordsField, self).save_form_data(instance, data)
def contribute_to_class(self, cls, name):
"""
Swap out any reference to ``KeywordsField`` with the
``KEYWORDS_FIELD_string`` field in ``search_fields``.
"""
super(KeywordsField, self).contribute_to_class(cls, name)
string_field_name = list(self.fields.keys())[0] % \
self.related_field_name
if hasattr(cls, "search_fields") and name in cls.search_fields:
try:
weight = cls.search_fields[name]
except TypeError:
# search_fields is a sequence.
index = cls.search_fields.index(name)
search_fields_type = type(cls.search_fields)
cls.search_fields = list(cls.search_fields)
cls.search_fields[index] = string_field_name
cls.search_fields = search_fields_type(cls.search_fields)
else:
del cls.search_fields[name]
cls.search_fields[string_field_name] = weight
def related_items_changed(self, instance, related_manager):
"""
Stores the keywords as a single string for searching.
"""
assigned = related_manager.select_related("keyword")
keywords = " ".join([str(a.keyword) for a in assigned])
string_field_name = list(self.fields.keys())[0] % \
self.related_field_name
if getattr(instance, string_field_name) != keywords:
setattr(instance, string_field_name, keywords)
instance.save()
class RatingField(BaseGenericRelation):
"""
Stores the rating count and average against the
``RATING_FIELD_NAME_count`` and ``RATING_FIELD_NAME_average``
fields when a rating is saved or deleted.
"""
default_related_model = "generic.Rating"
fields = {"%s_count": IntegerField(default=0, editable=False),
"%s_sum": IntegerField(default=0, editable=False),
"%s_average": FloatField(default=0, editable=False)}
def related_items_changed(self, instance, related_manager):
"""
Calculates and saves the average rating.
"""
ratings = [r.value for r in related_manager.all()]
count = len(ratings)
_sum = sum(ratings)
average = _sum / count if count > 0 else 0
setattr(instance, "%s_count" % self.related_field_name, count)
setattr(instance, "%s_sum" % self.related_field_name, _sum)
setattr(instance, "%s_average" % self.related_field_name, average)
instance.save()
| 41.992509 | 77 | 0.635926 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.